Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
4 * Loongson Local IO Interrupt Controller support
5 */
6
7#include <linux/errno.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/ioport.h>
12#include <linux/irqchip.h>
13#include <linux/of_address.h>
14#include <linux/of_irq.h>
15#include <linux/io.h>
16#include <linux/smp.h>
17#include <linux/irqchip/chained_irq.h>
18
19#ifdef CONFIG_MIPS
20#include <loongson.h>
21#else
22#include <asm/loongson.h>
23#endif
24
25#define LIOINTC_CHIP_IRQ 32
26#define LIOINTC_NUM_PARENT 4
27#define LIOINTC_NUM_CORES 4
28
29#define LIOINTC_INTC_CHIP_START 0x20
30
31#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
32#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
33#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
34#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
35/*
36 * LIOINTC_REG_INTC_POL register is only valid for Loongson-2K series, and
37 * Loongson-3 series behave as noops.
38 */
39#define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10)
40#define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14)
41
42#define LIOINTC_SHIFT_INTx 4
43
44#define LIOINTC_ERRATA_IRQ 10
45
46#if defined(CONFIG_MIPS)
47#define liointc_core_id get_ebase_cpunum()
48#else
49#define liointc_core_id get_csr_cpuid()
50#endif
51
52struct liointc_handler_data {
53 struct liointc_priv *priv;
54 u32 parent_int_map;
55};
56
57struct liointc_priv {
58 struct irq_chip_generic *gc;
59 struct liointc_handler_data handler[LIOINTC_NUM_PARENT];
60 void __iomem *core_isr[LIOINTC_NUM_CORES];
61 u8 map_cache[LIOINTC_CHIP_IRQ];
62 u32 int_pol;
63 u32 int_edge;
64 bool has_lpc_irq_errata;
65};
66
67struct fwnode_handle *liointc_handle;
68
69static void liointc_chained_handle_irq(struct irq_desc *desc)
70{
71 struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
72 struct irq_chip *chip = irq_desc_get_chip(desc);
73 struct irq_chip_generic *gc = handler->priv->gc;
74 int core = liointc_core_id % LIOINTC_NUM_CORES;
75 u32 pending;
76
77 chained_irq_enter(chip, desc);
78
79 pending = readl(handler->priv->core_isr[core]);
80
81 if (!pending) {
82 /* Always blame LPC IRQ if we have that bug */
83 if (handler->priv->has_lpc_irq_errata &&
84 (handler->parent_int_map & gc->mask_cache &
85 BIT(LIOINTC_ERRATA_IRQ)))
86 pending = BIT(LIOINTC_ERRATA_IRQ);
87 else
88 spurious_interrupt();
89 }
90
91 while (pending) {
92 int bit = __ffs(pending);
93
94 generic_handle_domain_irq(gc->domain, bit);
95 pending &= ~BIT(bit);
96 }
97
98 chained_irq_exit(chip, desc);
99}
100
101static void liointc_set_bit(struct irq_chip_generic *gc,
102 unsigned int offset,
103 u32 mask, bool set)
104{
105 if (set)
106 writel(readl(gc->reg_base + offset) | mask,
107 gc->reg_base + offset);
108 else
109 writel(readl(gc->reg_base + offset) & ~mask,
110 gc->reg_base + offset);
111}
112
113static int liointc_set_type(struct irq_data *data, unsigned int type)
114{
115 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
116 u32 mask = data->mask;
117 unsigned long flags;
118
119 irq_gc_lock_irqsave(gc, flags);
120 switch (type) {
121 case IRQ_TYPE_LEVEL_HIGH:
122 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
123 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
124 break;
125 case IRQ_TYPE_LEVEL_LOW:
126 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
127 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
128 break;
129 case IRQ_TYPE_EDGE_RISING:
130 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
131 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
132 break;
133 case IRQ_TYPE_EDGE_FALLING:
134 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
135 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
136 break;
137 default:
138 irq_gc_unlock_irqrestore(gc, flags);
139 return -EINVAL;
140 }
141 irq_gc_unlock_irqrestore(gc, flags);
142
143 irqd_set_trigger_type(data, type);
144 return 0;
145}
146
147static void liointc_suspend(struct irq_chip_generic *gc)
148{
149 struct liointc_priv *priv = gc->private;
150
151 priv->int_pol = readl(gc->reg_base + LIOINTC_REG_INTC_POL);
152 priv->int_edge = readl(gc->reg_base + LIOINTC_REG_INTC_EDGE);
153}
154
155static void liointc_resume(struct irq_chip_generic *gc)
156{
157 struct liointc_priv *priv = gc->private;
158 unsigned long flags;
159 int i;
160
161 irq_gc_lock_irqsave(gc, flags);
162 /* Disable all at first */
163 writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
164 /* Restore map cache */
165 for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
166 writeb(priv->map_cache[i], gc->reg_base + i);
167 writel(priv->int_pol, gc->reg_base + LIOINTC_REG_INTC_POL);
168 writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE);
169 /* Restore mask cache */
170 writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
171 irq_gc_unlock_irqrestore(gc, flags);
172}
173
174static int parent_irq[LIOINTC_NUM_PARENT];
175static u32 parent_int_map[LIOINTC_NUM_PARENT];
176static const char *const parent_names[] = {"int0", "int1", "int2", "int3"};
177static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
178
179static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
180 const u32 *intspec, unsigned int intsize,
181 unsigned long *out_hwirq, unsigned int *out_type)
182{
183 if (WARN_ON(intsize < 1))
184 return -EINVAL;
185 *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ;
186
187 if (intsize > 1)
188 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
189 else
190 *out_type = IRQ_TYPE_NONE;
191
192 return 0;
193}
194
195static const struct irq_domain_ops acpi_irq_gc_ops = {
196 .map = irq_map_generic_chip,
197 .unmap = irq_unmap_generic_chip,
198 .xlate = liointc_domain_xlate,
199};
200
201static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
202 struct fwnode_handle *domain_handle, struct device_node *node)
203{
204 int i, err;
205 void __iomem *base;
206 struct irq_chip_type *ct;
207 struct irq_chip_generic *gc;
208 struct irq_domain *domain;
209 struct liointc_priv *priv;
210
211 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
212 if (!priv)
213 return -ENOMEM;
214
215 base = ioremap(addr, size);
216 if (!base)
217 goto out_free_priv;
218
219 for (i = 0; i < LIOINTC_NUM_CORES; i++)
220 priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
221
222 for (i = 0; i < LIOINTC_NUM_PARENT; i++)
223 priv->handler[i].parent_int_map = parent_int_map[i];
224
225 if (revision > 1) {
226 for (i = 0; i < LIOINTC_NUM_CORES; i++) {
227 int index = of_property_match_string(node,
228 "reg-names", core_reg_names[i]);
229
230 if (index < 0)
231 continue;
232
233 priv->core_isr[i] = of_iomap(node, index);
234 }
235
236 if (!priv->core_isr[0])
237 goto out_iounmap;
238 }
239
240 /* Setup IRQ domain */
241 if (!acpi_disabled)
242 domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
243 &acpi_irq_gc_ops, priv);
244 else
245 domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
246 &irq_generic_chip_ops, priv);
247 if (!domain) {
248 pr_err("loongson-liointc: cannot add IRQ domain\n");
249 goto out_iounmap;
250 }
251
252 err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1,
253 (node ? node->full_name : "LIOINTC"),
254 handle_level_irq, 0, IRQ_NOPROBE, 0);
255 if (err) {
256 pr_err("loongson-liointc: unable to register IRQ domain\n");
257 goto out_free_domain;
258 }
259
260
261 /* Disable all IRQs */
262 writel(0xffffffff, base + LIOINTC_REG_INTC_DISABLE);
263 /* Set to level triggered */
264 writel(0x0, base + LIOINTC_REG_INTC_EDGE);
265
266 /* Generate parent INT part of map cache */
267 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
268 u32 pending = priv->handler[i].parent_int_map;
269
270 while (pending) {
271 int bit = __ffs(pending);
272
273 priv->map_cache[bit] = BIT(i) << LIOINTC_SHIFT_INTx;
274 pending &= ~BIT(bit);
275 }
276 }
277
278 for (i = 0; i < LIOINTC_CHIP_IRQ; i++) {
279 /* Generate core part of map cache */
280 priv->map_cache[i] |= BIT(loongson_sysconf.boot_cpu_id);
281 writeb(priv->map_cache[i], base + i);
282 }
283
284 gc = irq_get_domain_generic_chip(domain, 0);
285 gc->private = priv;
286 gc->reg_base = base;
287 gc->domain = domain;
288 gc->suspend = liointc_suspend;
289 gc->resume = liointc_resume;
290
291 ct = gc->chip_types;
292 ct->regs.enable = LIOINTC_REG_INTC_ENABLE;
293 ct->regs.disable = LIOINTC_REG_INTC_DISABLE;
294 ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
295 ct->chip.irq_mask = irq_gc_mask_disable_reg;
296 ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
297 ct->chip.irq_set_type = liointc_set_type;
298 ct->chip.flags = IRQCHIP_SKIP_SET_WAKE;
299
300 gc->mask_cache = 0;
301 priv->gc = gc;
302
303 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
304 if (parent_irq[i] <= 0)
305 continue;
306
307 priv->handler[i].priv = priv;
308 irq_set_chained_handler_and_data(parent_irq[i],
309 liointc_chained_handle_irq, &priv->handler[i]);
310 }
311
312 liointc_handle = domain_handle;
313 return 0;
314
315out_free_domain:
316 irq_domain_remove(domain);
317out_iounmap:
318 iounmap(base);
319out_free_priv:
320 kfree(priv);
321
322 return -EINVAL;
323}
324
325#ifdef CONFIG_OF
326
327static int __init liointc_of_init(struct device_node *node,
328 struct device_node *parent)
329{
330 bool have_parent = FALSE;
331 int sz, i, index, revision, err = 0;
332 struct resource res;
333
334 if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
335 index = 0;
336 revision = 1;
337 } else {
338 index = of_property_match_string(node, "reg-names", "main");
339 revision = 2;
340 }
341
342 if (of_address_to_resource(node, index, &res))
343 return -EINVAL;
344
345 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
346 parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
347 if (parent_irq[i] > 0)
348 have_parent = TRUE;
349 }
350 if (!have_parent)
351 return -ENODEV;
352
353 sz = of_property_read_variable_u32_array(node,
354 "loongson,parent_int_map",
355 &parent_int_map[0],
356 LIOINTC_NUM_PARENT,
357 LIOINTC_NUM_PARENT);
358 if (sz < 4) {
359 pr_err("loongson-liointc: No parent_int_map\n");
360 return -ENODEV;
361 }
362
363 err = liointc_init(res.start, resource_size(&res),
364 revision, of_node_to_fwnode(node), node);
365 if (err < 0)
366 return err;
367
368 return 0;
369}
370
371IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
372IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
373IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
374
375#endif
376
377#ifdef CONFIG_ACPI
378static int __init htintc_parse_madt(union acpi_subtable_headers *header,
379 const unsigned long end)
380{
381 struct acpi_madt_ht_pic *htintc_entry = (struct acpi_madt_ht_pic *)header;
382 struct irq_domain *parent = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY);
383
384 return htvec_acpi_init(parent, htintc_entry);
385}
386
387static int __init acpi_cascade_irqdomain_init(void)
388{
389 int r;
390
391 r = acpi_table_parse_madt(ACPI_MADT_TYPE_HT_PIC, htintc_parse_madt, 0);
392 if (r < 0)
393 return r;
394
395 return 0;
396}
397
398int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
399{
400 int ret;
401 struct fwnode_handle *domain_handle;
402
403 parent_int_map[0] = acpi_liointc->cascade_map[0];
404 parent_int_map[1] = acpi_liointc->cascade_map[1];
405
406 parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
407 parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
408
409 domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
410 if (!domain_handle) {
411 pr_err("Unable to allocate domain handle\n");
412 return -ENOMEM;
413 }
414
415 ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
416 1, domain_handle, NULL);
417 if (ret == 0)
418 ret = acpi_cascade_irqdomain_init();
419 else
420 irq_domain_free_fwnode(domain_handle);
421
422 return ret;
423}
424#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
4 * Loongson Local IO Interrupt Controller support
5 */
6
7#include <linux/errno.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/ioport.h>
12#include <linux/irqchip.h>
13#include <linux/of_address.h>
14#include <linux/of_irq.h>
15#include <linux/io.h>
16#include <linux/smp.h>
17#include <linux/irqchip/chained_irq.h>
18
19#ifdef CONFIG_MIPS
20#include <loongson.h>
21#else
22#include <asm/loongson.h>
23#endif
24
25#define LIOINTC_CHIP_IRQ 32
26#define LIOINTC_NUM_PARENT 4
27#define LIOINTC_NUM_CORES 4
28
29#define LIOINTC_INTC_CHIP_START 0x20
30
31#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
32#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
33#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
34#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
35#define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10)
36#define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14)
37
38#define LIOINTC_SHIFT_INTx 4
39
40#define LIOINTC_ERRATA_IRQ 10
41
42#if defined(CONFIG_MIPS)
43#define liointc_core_id get_ebase_cpunum()
44#else
45#define liointc_core_id get_csr_cpuid()
46#endif
47
48struct liointc_handler_data {
49 struct liointc_priv *priv;
50 u32 parent_int_map;
51};
52
53struct liointc_priv {
54 struct irq_chip_generic *gc;
55 struct liointc_handler_data handler[LIOINTC_NUM_PARENT];
56 void __iomem *core_isr[LIOINTC_NUM_CORES];
57 u8 map_cache[LIOINTC_CHIP_IRQ];
58 bool has_lpc_irq_errata;
59};
60
61struct fwnode_handle *liointc_handle;
62
63static void liointc_chained_handle_irq(struct irq_desc *desc)
64{
65 struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
66 struct irq_chip *chip = irq_desc_get_chip(desc);
67 struct irq_chip_generic *gc = handler->priv->gc;
68 int core = liointc_core_id % LIOINTC_NUM_CORES;
69 u32 pending;
70
71 chained_irq_enter(chip, desc);
72
73 pending = readl(handler->priv->core_isr[core]);
74
75 if (!pending) {
76 /* Always blame LPC IRQ if we have that bug */
77 if (handler->priv->has_lpc_irq_errata &&
78 (handler->parent_int_map & gc->mask_cache &
79 BIT(LIOINTC_ERRATA_IRQ)))
80 pending = BIT(LIOINTC_ERRATA_IRQ);
81 else
82 spurious_interrupt();
83 }
84
85 while (pending) {
86 int bit = __ffs(pending);
87
88 generic_handle_domain_irq(gc->domain, bit);
89 pending &= ~BIT(bit);
90 }
91
92 chained_irq_exit(chip, desc);
93}
94
95static void liointc_set_bit(struct irq_chip_generic *gc,
96 unsigned int offset,
97 u32 mask, bool set)
98{
99 if (set)
100 writel(readl(gc->reg_base + offset) | mask,
101 gc->reg_base + offset);
102 else
103 writel(readl(gc->reg_base + offset) & ~mask,
104 gc->reg_base + offset);
105}
106
107static int liointc_set_type(struct irq_data *data, unsigned int type)
108{
109 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
110 u32 mask = data->mask;
111 unsigned long flags;
112
113 irq_gc_lock_irqsave(gc, flags);
114 switch (type) {
115 case IRQ_TYPE_LEVEL_HIGH:
116 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
117 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
118 break;
119 case IRQ_TYPE_LEVEL_LOW:
120 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
121 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
122 break;
123 case IRQ_TYPE_EDGE_RISING:
124 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
125 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
126 break;
127 case IRQ_TYPE_EDGE_FALLING:
128 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
129 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
130 break;
131 default:
132 irq_gc_unlock_irqrestore(gc, flags);
133 return -EINVAL;
134 }
135 irq_gc_unlock_irqrestore(gc, flags);
136
137 irqd_set_trigger_type(data, type);
138 return 0;
139}
140
141static void liointc_resume(struct irq_chip_generic *gc)
142{
143 struct liointc_priv *priv = gc->private;
144 unsigned long flags;
145 int i;
146
147 irq_gc_lock_irqsave(gc, flags);
148 /* Disable all at first */
149 writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
150 /* Restore map cache */
151 for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
152 writeb(priv->map_cache[i], gc->reg_base + i);
153 /* Restore mask cache */
154 writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
155 irq_gc_unlock_irqrestore(gc, flags);
156}
157
158static int parent_irq[LIOINTC_NUM_PARENT];
159static u32 parent_int_map[LIOINTC_NUM_PARENT];
160static const char *const parent_names[] = {"int0", "int1", "int2", "int3"};
161static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
162
163static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
164 const u32 *intspec, unsigned int intsize,
165 unsigned long *out_hwirq, unsigned int *out_type)
166{
167 if (WARN_ON(intsize < 1))
168 return -EINVAL;
169 *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ;
170
171 if (intsize > 1)
172 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
173 else
174 *out_type = IRQ_TYPE_NONE;
175
176 return 0;
177}
178
179static const struct irq_domain_ops acpi_irq_gc_ops = {
180 .map = irq_map_generic_chip,
181 .unmap = irq_unmap_generic_chip,
182 .xlate = liointc_domain_xlate,
183};
184
185static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
186 struct fwnode_handle *domain_handle, struct device_node *node)
187{
188 int i, err;
189 void __iomem *base;
190 struct irq_chip_type *ct;
191 struct irq_chip_generic *gc;
192 struct irq_domain *domain;
193 struct liointc_priv *priv;
194
195 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
196 if (!priv)
197 return -ENOMEM;
198
199 base = ioremap(addr, size);
200 if (!base)
201 goto out_free_priv;
202
203 for (i = 0; i < LIOINTC_NUM_CORES; i++)
204 priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
205
206 for (i = 0; i < LIOINTC_NUM_PARENT; i++)
207 priv->handler[i].parent_int_map = parent_int_map[i];
208
209 if (revision > 1) {
210 for (i = 0; i < LIOINTC_NUM_CORES; i++) {
211 int index = of_property_match_string(node,
212 "reg-names", core_reg_names[i]);
213
214 if (index < 0)
215 continue;
216
217 priv->core_isr[i] = of_iomap(node, index);
218 }
219
220 if (!priv->core_isr[0])
221 goto out_iounmap;
222 }
223
224 /* Setup IRQ domain */
225 if (!acpi_disabled)
226 domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
227 &acpi_irq_gc_ops, priv);
228 else
229 domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
230 &irq_generic_chip_ops, priv);
231 if (!domain) {
232 pr_err("loongson-liointc: cannot add IRQ domain\n");
233 goto out_iounmap;
234 }
235
236 err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1,
237 (node ? node->full_name : "LIOINTC"),
238 handle_level_irq, 0, IRQ_NOPROBE, 0);
239 if (err) {
240 pr_err("loongson-liointc: unable to register IRQ domain\n");
241 goto out_free_domain;
242 }
243
244
245 /* Disable all IRQs */
246 writel(0xffffffff, base + LIOINTC_REG_INTC_DISABLE);
247 /* Set to level triggered */
248 writel(0x0, base + LIOINTC_REG_INTC_EDGE);
249
250 /* Generate parent INT part of map cache */
251 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
252 u32 pending = priv->handler[i].parent_int_map;
253
254 while (pending) {
255 int bit = __ffs(pending);
256
257 priv->map_cache[bit] = BIT(i) << LIOINTC_SHIFT_INTx;
258 pending &= ~BIT(bit);
259 }
260 }
261
262 for (i = 0; i < LIOINTC_CHIP_IRQ; i++) {
263 /* Generate core part of map cache */
264 priv->map_cache[i] |= BIT(loongson_sysconf.boot_cpu_id);
265 writeb(priv->map_cache[i], base + i);
266 }
267
268 gc = irq_get_domain_generic_chip(domain, 0);
269 gc->private = priv;
270 gc->reg_base = base;
271 gc->domain = domain;
272 gc->resume = liointc_resume;
273
274 ct = gc->chip_types;
275 ct->regs.enable = LIOINTC_REG_INTC_ENABLE;
276 ct->regs.disable = LIOINTC_REG_INTC_DISABLE;
277 ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
278 ct->chip.irq_mask = irq_gc_mask_disable_reg;
279 ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
280 ct->chip.irq_set_type = liointc_set_type;
281
282 gc->mask_cache = 0;
283 priv->gc = gc;
284
285 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
286 if (parent_irq[i] <= 0)
287 continue;
288
289 priv->handler[i].priv = priv;
290 irq_set_chained_handler_and_data(parent_irq[i],
291 liointc_chained_handle_irq, &priv->handler[i]);
292 }
293
294 liointc_handle = domain_handle;
295 return 0;
296
297out_free_domain:
298 irq_domain_remove(domain);
299out_iounmap:
300 iounmap(base);
301out_free_priv:
302 kfree(priv);
303
304 return -EINVAL;
305}
306
307#ifdef CONFIG_OF
308
309static int __init liointc_of_init(struct device_node *node,
310 struct device_node *parent)
311{
312 bool have_parent = FALSE;
313 int sz, i, index, revision, err = 0;
314 struct resource res;
315
316 if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
317 index = 0;
318 revision = 1;
319 } else {
320 index = of_property_match_string(node, "reg-names", "main");
321 revision = 2;
322 }
323
324 if (of_address_to_resource(node, index, &res))
325 return -EINVAL;
326
327 for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
328 parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
329 if (parent_irq[i] > 0)
330 have_parent = TRUE;
331 }
332 if (!have_parent)
333 return -ENODEV;
334
335 sz = of_property_read_variable_u32_array(node,
336 "loongson,parent_int_map",
337 &parent_int_map[0],
338 LIOINTC_NUM_PARENT,
339 LIOINTC_NUM_PARENT);
340 if (sz < 4) {
341 pr_err("loongson-liointc: No parent_int_map\n");
342 return -ENODEV;
343 }
344
345 err = liointc_init(res.start, resource_size(&res),
346 revision, of_node_to_fwnode(node), node);
347 if (err < 0)
348 return err;
349
350 return 0;
351}
352
353IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
354IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
355IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
356
357#endif
358
359#ifdef CONFIG_ACPI
360static int __init htintc_parse_madt(union acpi_subtable_headers *header,
361 const unsigned long end)
362{
363 struct acpi_madt_ht_pic *htintc_entry = (struct acpi_madt_ht_pic *)header;
364 struct irq_domain *parent = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY);
365
366 return htvec_acpi_init(parent, htintc_entry);
367}
368
369static int __init acpi_cascade_irqdomain_init(void)
370{
371 int r;
372
373 r = acpi_table_parse_madt(ACPI_MADT_TYPE_HT_PIC, htintc_parse_madt, 0);
374 if (r < 0)
375 return r;
376
377 return 0;
378}
379
380int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
381{
382 int ret;
383 struct fwnode_handle *domain_handle;
384
385 parent_int_map[0] = acpi_liointc->cascade_map[0];
386 parent_int_map[1] = acpi_liointc->cascade_map[1];
387
388 parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
389 parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
390
391 domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
392 if (!domain_handle) {
393 pr_err("Unable to allocate domain handle\n");
394 return -ENOMEM;
395 }
396
397 ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
398 1, domain_handle, NULL);
399 if (ret == 0)
400 ret = acpi_cascade_irqdomain_init();
401 else
402 irq_domain_free_fwnode(domain_handle);
403
404 return ret;
405}
406#endif