Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Broadcom BCM7038 style Level 1 interrupt controller driver
4 *
5 * Copyright (C) 2014 Broadcom Corporation
6 * Author: Kevin Cernekee
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/bitops.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/ioport.h>
17#include <linux/irq.h>
18#include <linux/irqdomain.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_irq.h>
22#include <linux/of_address.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/smp.h>
26#include <linux/types.h>
27#include <linux/irqchip.h>
28#include <linux/irqchip/chained_irq.h>
29#include <linux/syscore_ops.h>
30
31#define IRQS_PER_WORD 32
32#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
33#define MAX_WORDS 8
34
35struct bcm7038_l1_cpu;
36
37struct bcm7038_l1_chip {
38 raw_spinlock_t lock;
39 unsigned int n_words;
40 struct irq_domain *domain;
41 struct bcm7038_l1_cpu *cpus[NR_CPUS];
42#ifdef CONFIG_PM_SLEEP
43 struct list_head list;
44 u32 wake_mask[MAX_WORDS];
45#endif
46 u32 irq_fwd_mask[MAX_WORDS];
47 u8 affinity[MAX_WORDS * IRQS_PER_WORD];
48};
49
50struct bcm7038_l1_cpu {
51 void __iomem *map_base;
52 u32 mask_cache[];
53};
54
55/*
56 * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
57 *
58 * 7038:
59 * 0x1000_1400: W0_STATUS
60 * 0x1000_1404: W1_STATUS
61 * 0x1000_1408: W0_MASK_STATUS
62 * 0x1000_140c: W1_MASK_STATUS
63 * 0x1000_1410: W0_MASK_SET
64 * 0x1000_1414: W1_MASK_SET
65 * 0x1000_1418: W0_MASK_CLEAR
66 * 0x1000_141c: W1_MASK_CLEAR
67 *
68 * 7445:
69 * 0xf03e_1500: W0_STATUS
70 * 0xf03e_1504: W1_STATUS
71 * 0xf03e_1508: W2_STATUS
72 * 0xf03e_150c: W3_STATUS
73 * 0xf03e_1510: W4_STATUS
74 * 0xf03e_1514: W0_MASK_STATUS
75 * 0xf03e_1518: W1_MASK_STATUS
76 * [...]
77 */
78
79static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
80 unsigned int word)
81{
82 return (0 * intc->n_words + word) * sizeof(u32);
83}
84
85static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
86 unsigned int word)
87{
88 return (1 * intc->n_words + word) * sizeof(u32);
89}
90
91static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
92 unsigned int word)
93{
94 return (2 * intc->n_words + word) * sizeof(u32);
95}
96
97static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc,
98 unsigned int word)
99{
100 return (3 * intc->n_words + word) * sizeof(u32);
101}
102
103static inline u32 l1_readl(void __iomem *reg)
104{
105 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
106 return ioread32be(reg);
107 else
108 return readl(reg);
109}
110
111static inline void l1_writel(u32 val, void __iomem *reg)
112{
113 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
114 iowrite32be(val, reg);
115 else
116 writel(val, reg);
117}
118
119static void bcm7038_l1_irq_handle(struct irq_desc *desc)
120{
121 struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
122 struct bcm7038_l1_cpu *cpu;
123 struct irq_chip *chip = irq_desc_get_chip(desc);
124 unsigned int idx;
125
126#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
127 cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
128#else
129 cpu = intc->cpus[0];
130#endif
131
132 chained_irq_enter(chip, desc);
133
134 for (idx = 0; idx < intc->n_words; idx++) {
135 int base = idx * IRQS_PER_WORD;
136 unsigned long pending, flags;
137 int hwirq;
138
139 raw_spin_lock_irqsave(&intc->lock, flags);
140 pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
141 ~cpu->mask_cache[idx];
142 raw_spin_unlock_irqrestore(&intc->lock, flags);
143
144 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
145 generic_handle_domain_irq(intc->domain, base + hwirq);
146 }
147
148 chained_irq_exit(chip, desc);
149}
150
151static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx)
152{
153 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
154 u32 word = d->hwirq / IRQS_PER_WORD;
155 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
156
157 intc->cpus[cpu_idx]->mask_cache[word] &= ~mask;
158 l1_writel(mask, intc->cpus[cpu_idx]->map_base +
159 reg_mask_clr(intc, word));
160}
161
162static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx)
163{
164 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
165 u32 word = d->hwirq / IRQS_PER_WORD;
166 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
167
168 intc->cpus[cpu_idx]->mask_cache[word] |= mask;
169 l1_writel(mask, intc->cpus[cpu_idx]->map_base +
170 reg_mask_set(intc, word));
171}
172
173static void bcm7038_l1_unmask(struct irq_data *d)
174{
175 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
176 unsigned long flags;
177
178 raw_spin_lock_irqsave(&intc->lock, flags);
179 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]);
180 raw_spin_unlock_irqrestore(&intc->lock, flags);
181}
182
183static void bcm7038_l1_mask(struct irq_data *d)
184{
185 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
186 unsigned long flags;
187
188 raw_spin_lock_irqsave(&intc->lock, flags);
189 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]);
190 raw_spin_unlock_irqrestore(&intc->lock, flags);
191}
192
193#if defined(CONFIG_MIPS) && defined(CONFIG_SMP)
194static int bcm7038_l1_set_affinity(struct irq_data *d,
195 const struct cpumask *dest,
196 bool force)
197{
198 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
199 unsigned long flags;
200 irq_hw_number_t hw = d->hwirq;
201 u32 word = hw / IRQS_PER_WORD;
202 u32 mask = BIT(hw % IRQS_PER_WORD);
203 unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask);
204 bool was_disabled;
205
206 raw_spin_lock_irqsave(&intc->lock, flags);
207
208 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] &
209 mask);
210 __bcm7038_l1_mask(d, intc->affinity[hw]);
211 intc->affinity[hw] = first_cpu;
212 if (!was_disabled)
213 __bcm7038_l1_unmask(d, first_cpu);
214
215 raw_spin_unlock_irqrestore(&intc->lock, flags);
216 irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
217
218 return 0;
219}
220#endif
221
222static int __init bcm7038_l1_init_one(struct device_node *dn,
223 unsigned int idx,
224 struct bcm7038_l1_chip *intc)
225{
226 struct resource res;
227 resource_size_t sz;
228 struct bcm7038_l1_cpu *cpu;
229 unsigned int i, n_words, parent_irq;
230 int ret;
231
232 if (of_address_to_resource(dn, idx, &res))
233 return -EINVAL;
234 sz = resource_size(&res);
235 n_words = sz / REG_BYTES_PER_IRQ_WORD;
236
237 if (n_words > MAX_WORDS)
238 return -EINVAL;
239 else if (!intc->n_words)
240 intc->n_words = n_words;
241 else if (intc->n_words != n_words)
242 return -EINVAL;
243
244 ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
245 intc->irq_fwd_mask, n_words);
246 if (ret != 0 && ret != -EINVAL) {
247 /* property exists but has the wrong number of words */
248 pr_err("invalid brcm,int-fwd-mask property\n");
249 return -EINVAL;
250 }
251
252 cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, mask_cache, n_words),
253 GFP_KERNEL);
254 if (!cpu)
255 return -ENOMEM;
256
257 cpu->map_base = ioremap(res.start, sz);
258 if (!cpu->map_base)
259 return -ENOMEM;
260
261 for (i = 0; i < n_words; i++) {
262 l1_writel(~intc->irq_fwd_mask[i],
263 cpu->map_base + reg_mask_set(intc, i));
264 l1_writel(intc->irq_fwd_mask[i],
265 cpu->map_base + reg_mask_clr(intc, i));
266 cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
267 }
268
269 parent_irq = irq_of_parse_and_map(dn, idx);
270 if (!parent_irq) {
271 pr_err("failed to map parent interrupt %d\n", parent_irq);
272 return -EINVAL;
273 }
274
275 if (of_property_read_bool(dn, "brcm,irq-can-wake"))
276 enable_irq_wake(parent_irq);
277
278 irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
279 intc);
280
281 return 0;
282}
283
284#ifdef CONFIG_PM_SLEEP
285/*
286 * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
287 * used because the struct chip_type suspend/resume hooks are not called
288 * unless chip_type is hooked onto a generic_chip. Since this driver does
289 * not use generic_chip, we need to manually hook our resume/suspend to
290 * syscore_ops.
291 */
292static LIST_HEAD(bcm7038_l1_intcs_list);
293static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
294
295static int bcm7038_l1_suspend(void)
296{
297 struct bcm7038_l1_chip *intc;
298 int boot_cpu, word;
299 u32 val;
300
301 /* Wakeup interrupt should only come from the boot cpu */
302#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
303 boot_cpu = cpu_logical_map(0);
304#else
305 boot_cpu = 0;
306#endif
307
308 list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
309 for (word = 0; word < intc->n_words; word++) {
310 val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
311 l1_writel(~val,
312 intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
313 l1_writel(val,
314 intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
315 }
316 }
317
318 return 0;
319}
320
321static void bcm7038_l1_resume(void)
322{
323 struct bcm7038_l1_chip *intc;
324 int boot_cpu, word;
325
326#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
327 boot_cpu = cpu_logical_map(0);
328#else
329 boot_cpu = 0;
330#endif
331
332 list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
333 for (word = 0; word < intc->n_words; word++) {
334 l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
335 intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
336 l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
337 intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
338 }
339 }
340}
341
342static struct syscore_ops bcm7038_l1_syscore_ops = {
343 .suspend = bcm7038_l1_suspend,
344 .resume = bcm7038_l1_resume,
345};
346
347static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
348{
349 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
350 unsigned long flags;
351 u32 word = d->hwirq / IRQS_PER_WORD;
352 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
353
354 raw_spin_lock_irqsave(&intc->lock, flags);
355 if (on)
356 intc->wake_mask[word] |= mask;
357 else
358 intc->wake_mask[word] &= ~mask;
359 raw_spin_unlock_irqrestore(&intc->lock, flags);
360
361 return 0;
362}
363#endif
364
365static struct irq_chip bcm7038_l1_irq_chip = {
366 .name = "bcm7038-l1",
367 .irq_mask = bcm7038_l1_mask,
368 .irq_unmask = bcm7038_l1_unmask,
369#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
370 .irq_set_affinity = bcm7038_l1_set_affinity,
371#endif
372#ifdef CONFIG_PM_SLEEP
373 .irq_set_wake = bcm7038_l1_set_wake,
374#endif
375};
376
377static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
378 irq_hw_number_t hw_irq)
379{
380 struct bcm7038_l1_chip *intc = d->host_data;
381 u32 mask = BIT(hw_irq % IRQS_PER_WORD);
382 u32 word = hw_irq / IRQS_PER_WORD;
383
384 if (intc->irq_fwd_mask[word] & mask)
385 return -EPERM;
386
387 irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
388 irq_set_chip_data(virq, d->host_data);
389 irqd_set_single_target(irq_get_irq_data(virq));
390 return 0;
391}
392
393static const struct irq_domain_ops bcm7038_l1_domain_ops = {
394 .xlate = irq_domain_xlate_onecell,
395 .map = bcm7038_l1_map,
396};
397
398static int __init bcm7038_l1_of_init(struct device_node *dn,
399 struct device_node *parent)
400{
401 struct bcm7038_l1_chip *intc;
402 int idx, ret;
403
404 intc = kzalloc(sizeof(*intc), GFP_KERNEL);
405 if (!intc)
406 return -ENOMEM;
407
408 raw_spin_lock_init(&intc->lock);
409 for_each_possible_cpu(idx) {
410 ret = bcm7038_l1_init_one(dn, idx, intc);
411 if (ret < 0) {
412 if (idx)
413 break;
414 pr_err("failed to remap intc L1 registers\n");
415 goto out_free;
416 }
417 }
418
419 intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
420 &bcm7038_l1_domain_ops,
421 intc);
422 if (!intc->domain) {
423 ret = -ENOMEM;
424 goto out_unmap;
425 }
426
427#ifdef CONFIG_PM_SLEEP
428 /* Add bcm7038_l1_chip into a list */
429 raw_spin_lock(&bcm7038_l1_intcs_lock);
430 list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
431 raw_spin_unlock(&bcm7038_l1_intcs_lock);
432
433 if (list_is_singular(&bcm7038_l1_intcs_list))
434 register_syscore_ops(&bcm7038_l1_syscore_ops);
435#endif
436
437 pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
438 dn, IRQS_PER_WORD * intc->n_words);
439
440 return 0;
441
442out_unmap:
443 for_each_possible_cpu(idx) {
444 struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
445
446 if (cpu) {
447 if (cpu->map_base)
448 iounmap(cpu->map_base);
449 kfree(cpu);
450 }
451 }
452out_free:
453 kfree(intc);
454 return ret;
455}
456
457IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1)
458IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_of_init)
459IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1)
460MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller");
461MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Broadcom BCM7038 style Level 1 interrupt controller driver
4 *
5 * Copyright (C) 2014 Broadcom Corporation
6 * Author: Kevin Cernekee
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/bitops.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/ioport.h>
17#include <linux/irq.h>
18#include <linux/irqdomain.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_irq.h>
22#include <linux/of_address.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/smp.h>
27#include <linux/types.h>
28#include <linux/irqchip.h>
29#include <linux/irqchip/chained_irq.h>
30#include <linux/syscore_ops.h>
31
32#define IRQS_PER_WORD 32
33#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
34#define MAX_WORDS 8
35
36struct bcm7038_l1_cpu;
37
38struct bcm7038_l1_chip {
39 raw_spinlock_t lock;
40 unsigned int n_words;
41 struct irq_domain *domain;
42 struct bcm7038_l1_cpu *cpus[NR_CPUS];
43#ifdef CONFIG_PM_SLEEP
44 struct list_head list;
45 u32 wake_mask[MAX_WORDS];
46#endif
47 u32 irq_fwd_mask[MAX_WORDS];
48 u8 affinity[MAX_WORDS * IRQS_PER_WORD];
49};
50
51struct bcm7038_l1_cpu {
52 void __iomem *map_base;
53 u32 mask_cache[];
54};
55
56/*
57 * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
58 *
59 * 7038:
60 * 0x1000_1400: W0_STATUS
61 * 0x1000_1404: W1_STATUS
62 * 0x1000_1408: W0_MASK_STATUS
63 * 0x1000_140c: W1_MASK_STATUS
64 * 0x1000_1410: W0_MASK_SET
65 * 0x1000_1414: W1_MASK_SET
66 * 0x1000_1418: W0_MASK_CLEAR
67 * 0x1000_141c: W1_MASK_CLEAR
68 *
69 * 7445:
70 * 0xf03e_1500: W0_STATUS
71 * 0xf03e_1504: W1_STATUS
72 * 0xf03e_1508: W2_STATUS
73 * 0xf03e_150c: W3_STATUS
74 * 0xf03e_1510: W4_STATUS
75 * 0xf03e_1514: W0_MASK_STATUS
76 * 0xf03e_1518: W1_MASK_STATUS
77 * [...]
78 */
79
80static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
81 unsigned int word)
82{
83 return (0 * intc->n_words + word) * sizeof(u32);
84}
85
86static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
87 unsigned int word)
88{
89 return (1 * intc->n_words + word) * sizeof(u32);
90}
91
92static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
93 unsigned int word)
94{
95 return (2 * intc->n_words + word) * sizeof(u32);
96}
97
98static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc,
99 unsigned int word)
100{
101 return (3 * intc->n_words + word) * sizeof(u32);
102}
103
104static inline u32 l1_readl(void __iomem *reg)
105{
106 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
107 return ioread32be(reg);
108 else
109 return readl(reg);
110}
111
112static inline void l1_writel(u32 val, void __iomem *reg)
113{
114 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
115 iowrite32be(val, reg);
116 else
117 writel(val, reg);
118}
119
120static void bcm7038_l1_irq_handle(struct irq_desc *desc)
121{
122 struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
123 struct bcm7038_l1_cpu *cpu;
124 struct irq_chip *chip = irq_desc_get_chip(desc);
125 unsigned int idx;
126
127#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
128 cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
129#else
130 cpu = intc->cpus[0];
131#endif
132
133 chained_irq_enter(chip, desc);
134
135 for (idx = 0; idx < intc->n_words; idx++) {
136 int base = idx * IRQS_PER_WORD;
137 unsigned long pending, flags;
138 int hwirq;
139
140 raw_spin_lock_irqsave(&intc->lock, flags);
141 pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
142 ~cpu->mask_cache[idx];
143 raw_spin_unlock_irqrestore(&intc->lock, flags);
144
145 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
146 generic_handle_domain_irq(intc->domain, base + hwirq);
147 }
148
149 chained_irq_exit(chip, desc);
150}
151
152static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx)
153{
154 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
155 u32 word = d->hwirq / IRQS_PER_WORD;
156 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
157
158 intc->cpus[cpu_idx]->mask_cache[word] &= ~mask;
159 l1_writel(mask, intc->cpus[cpu_idx]->map_base +
160 reg_mask_clr(intc, word));
161}
162
163static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx)
164{
165 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
166 u32 word = d->hwirq / IRQS_PER_WORD;
167 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
168
169 intc->cpus[cpu_idx]->mask_cache[word] |= mask;
170 l1_writel(mask, intc->cpus[cpu_idx]->map_base +
171 reg_mask_set(intc, word));
172}
173
174static void bcm7038_l1_unmask(struct irq_data *d)
175{
176 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
177 unsigned long flags;
178
179 raw_spin_lock_irqsave(&intc->lock, flags);
180 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]);
181 raw_spin_unlock_irqrestore(&intc->lock, flags);
182}
183
184static void bcm7038_l1_mask(struct irq_data *d)
185{
186 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
187 unsigned long flags;
188
189 raw_spin_lock_irqsave(&intc->lock, flags);
190 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]);
191 raw_spin_unlock_irqrestore(&intc->lock, flags);
192}
193
194#if defined(CONFIG_MIPS) && defined(CONFIG_SMP)
195static int bcm7038_l1_set_affinity(struct irq_data *d,
196 const struct cpumask *dest,
197 bool force)
198{
199 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
200 unsigned long flags;
201 irq_hw_number_t hw = d->hwirq;
202 u32 word = hw / IRQS_PER_WORD;
203 u32 mask = BIT(hw % IRQS_PER_WORD);
204 unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask);
205 bool was_disabled;
206
207 raw_spin_lock_irqsave(&intc->lock, flags);
208
209 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] &
210 mask);
211 __bcm7038_l1_mask(d, intc->affinity[hw]);
212 intc->affinity[hw] = first_cpu;
213 if (!was_disabled)
214 __bcm7038_l1_unmask(d, first_cpu);
215
216 raw_spin_unlock_irqrestore(&intc->lock, flags);
217 irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
218
219 return 0;
220}
221#endif
222
223static int __init bcm7038_l1_init_one(struct device_node *dn,
224 unsigned int idx,
225 struct bcm7038_l1_chip *intc)
226{
227 struct resource res;
228 resource_size_t sz;
229 struct bcm7038_l1_cpu *cpu;
230 unsigned int i, n_words, parent_irq;
231 int ret;
232
233 if (of_address_to_resource(dn, idx, &res))
234 return -EINVAL;
235 sz = resource_size(&res);
236 n_words = sz / REG_BYTES_PER_IRQ_WORD;
237
238 if (n_words > MAX_WORDS)
239 return -EINVAL;
240 else if (!intc->n_words)
241 intc->n_words = n_words;
242 else if (intc->n_words != n_words)
243 return -EINVAL;
244
245 ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
246 intc->irq_fwd_mask, n_words);
247 if (ret != 0 && ret != -EINVAL) {
248 /* property exists but has the wrong number of words */
249 pr_err("invalid brcm,int-fwd-mask property\n");
250 return -EINVAL;
251 }
252
253 cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
254 GFP_KERNEL);
255 if (!cpu)
256 return -ENOMEM;
257
258 cpu->map_base = ioremap(res.start, sz);
259 if (!cpu->map_base)
260 return -ENOMEM;
261
262 for (i = 0; i < n_words; i++) {
263 l1_writel(~intc->irq_fwd_mask[i],
264 cpu->map_base + reg_mask_set(intc, i));
265 l1_writel(intc->irq_fwd_mask[i],
266 cpu->map_base + reg_mask_clr(intc, i));
267 cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
268 }
269
270 parent_irq = irq_of_parse_and_map(dn, idx);
271 if (!parent_irq) {
272 pr_err("failed to map parent interrupt %d\n", parent_irq);
273 return -EINVAL;
274 }
275
276 if (of_property_read_bool(dn, "brcm,irq-can-wake"))
277 enable_irq_wake(parent_irq);
278
279 irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
280 intc);
281
282 return 0;
283}
284
285#ifdef CONFIG_PM_SLEEP
286/*
287 * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
288 * used because the struct chip_type suspend/resume hooks are not called
289 * unless chip_type is hooked onto a generic_chip. Since this driver does
290 * not use generic_chip, we need to manually hook our resume/suspend to
291 * syscore_ops.
292 */
293static LIST_HEAD(bcm7038_l1_intcs_list);
294static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
295
296static int bcm7038_l1_suspend(void)
297{
298 struct bcm7038_l1_chip *intc;
299 int boot_cpu, word;
300 u32 val;
301
302 /* Wakeup interrupt should only come from the boot cpu */
303#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
304 boot_cpu = cpu_logical_map(0);
305#else
306 boot_cpu = 0;
307#endif
308
309 list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
310 for (word = 0; word < intc->n_words; word++) {
311 val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
312 l1_writel(~val,
313 intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
314 l1_writel(val,
315 intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
316 }
317 }
318
319 return 0;
320}
321
322static void bcm7038_l1_resume(void)
323{
324 struct bcm7038_l1_chip *intc;
325 int boot_cpu, word;
326
327#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
328 boot_cpu = cpu_logical_map(0);
329#else
330 boot_cpu = 0;
331#endif
332
333 list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
334 for (word = 0; word < intc->n_words; word++) {
335 l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
336 intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
337 l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
338 intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
339 }
340 }
341}
342
343static struct syscore_ops bcm7038_l1_syscore_ops = {
344 .suspend = bcm7038_l1_suspend,
345 .resume = bcm7038_l1_resume,
346};
347
348static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
349{
350 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
351 unsigned long flags;
352 u32 word = d->hwirq / IRQS_PER_WORD;
353 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
354
355 raw_spin_lock_irqsave(&intc->lock, flags);
356 if (on)
357 intc->wake_mask[word] |= mask;
358 else
359 intc->wake_mask[word] &= ~mask;
360 raw_spin_unlock_irqrestore(&intc->lock, flags);
361
362 return 0;
363}
364#endif
365
366static struct irq_chip bcm7038_l1_irq_chip = {
367 .name = "bcm7038-l1",
368 .irq_mask = bcm7038_l1_mask,
369 .irq_unmask = bcm7038_l1_unmask,
370#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
371 .irq_set_affinity = bcm7038_l1_set_affinity,
372#endif
373#ifdef CONFIG_PM_SLEEP
374 .irq_set_wake = bcm7038_l1_set_wake,
375#endif
376};
377
378static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
379 irq_hw_number_t hw_irq)
380{
381 struct bcm7038_l1_chip *intc = d->host_data;
382 u32 mask = BIT(hw_irq % IRQS_PER_WORD);
383 u32 word = hw_irq / IRQS_PER_WORD;
384
385 if (intc->irq_fwd_mask[word] & mask)
386 return -EPERM;
387
388 irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
389 irq_set_chip_data(virq, d->host_data);
390 irqd_set_single_target(irq_get_irq_data(virq));
391 return 0;
392}
393
394static const struct irq_domain_ops bcm7038_l1_domain_ops = {
395 .xlate = irq_domain_xlate_onecell,
396 .map = bcm7038_l1_map,
397};
398
399static int __init bcm7038_l1_of_init(struct device_node *dn,
400 struct device_node *parent)
401{
402 struct bcm7038_l1_chip *intc;
403 int idx, ret;
404
405 intc = kzalloc(sizeof(*intc), GFP_KERNEL);
406 if (!intc)
407 return -ENOMEM;
408
409 raw_spin_lock_init(&intc->lock);
410 for_each_possible_cpu(idx) {
411 ret = bcm7038_l1_init_one(dn, idx, intc);
412 if (ret < 0) {
413 if (idx)
414 break;
415 pr_err("failed to remap intc L1 registers\n");
416 goto out_free;
417 }
418 }
419
420 intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
421 &bcm7038_l1_domain_ops,
422 intc);
423 if (!intc->domain) {
424 ret = -ENOMEM;
425 goto out_unmap;
426 }
427
428#ifdef CONFIG_PM_SLEEP
429 /* Add bcm7038_l1_chip into a list */
430 raw_spin_lock(&bcm7038_l1_intcs_lock);
431 list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
432 raw_spin_unlock(&bcm7038_l1_intcs_lock);
433
434 if (list_is_singular(&bcm7038_l1_intcs_list))
435 register_syscore_ops(&bcm7038_l1_syscore_ops);
436#endif
437
438 pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
439 dn, IRQS_PER_WORD * intc->n_words);
440
441 return 0;
442
443out_unmap:
444 for_each_possible_cpu(idx) {
445 struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
446
447 if (cpu) {
448 if (cpu->map_base)
449 iounmap(cpu->map_base);
450 kfree(cpu);
451 }
452 }
453out_free:
454 kfree(intc);
455 return ret;
456}
457
458IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1)
459IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_of_init)
460IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1)
461MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller");
462MODULE_LICENSE("GPL v2");