Loading...
Note: File does not exist in v5.9.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Loongson Extend I/O Interrupt Controller support
4 *
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 */
7
8#define pr_fmt(fmt) "eiointc: " fmt
9
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/irqchip.h>
13#include <linux/irqdomain.h>
14#include <linux/irqchip/chained_irq.h>
15#include <linux/kernel.h>
16#include <linux/platform_device.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <linux/of_platform.h>
20#include <linux/syscore_ops.h>
21
22#define EIOINTC_REG_NODEMAP 0x14a0
23#define EIOINTC_REG_IPMAP 0x14c0
24#define EIOINTC_REG_ENABLE 0x1600
25#define EIOINTC_REG_BOUNCE 0x1680
26#define EIOINTC_REG_ISR 0x1800
27#define EIOINTC_REG_ROUTE 0x1c00
28
29#define VEC_REG_COUNT 4
30#define VEC_COUNT_PER_REG 64
31#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
32#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
33#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
34#define EIOINTC_ALL_ENABLE 0xffffffff
35
36#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
37
38static int nr_pics;
39
40struct eiointc_priv {
41 u32 node;
42 nodemask_t node_map;
43 cpumask_t cpuspan_map;
44 struct fwnode_handle *domain_handle;
45 struct irq_domain *eiointc_domain;
46};
47
48static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
49
50static void eiointc_enable(void)
51{
52 uint64_t misc;
53
54 misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
55 misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
56 iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
57}
58
59static int cpu_to_eio_node(int cpu)
60{
61 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
62}
63
64static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
65{
66 int i, node, cpu_node, route_node;
67 unsigned char coremap;
68 uint32_t pos_off, data, data_byte, data_mask;
69
70 pos_off = pos & ~3;
71 data_byte = pos & 3;
72 data_mask = ~BIT_MASK(data_byte) & 0xf;
73
74 /* Calculate node and coremap of target irq */
75 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
76 coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
77
78 for_each_online_cpu(i) {
79 node = cpu_to_eio_node(i);
80 if (!node_isset(node, *node_map))
81 continue;
82
83 /* EIO node 0 is in charge of inter-node interrupt dispatch */
84 route_node = (node == mnode) ? cpu_node : node;
85 data = ((coremap | (route_node << 4)) << (data_byte * 8));
86 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
87 }
88}
89
90static DEFINE_RAW_SPINLOCK(affinity_lock);
91
92static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
93{
94 unsigned int cpu;
95 unsigned long flags;
96 uint32_t vector, regaddr;
97 struct cpumask intersect_affinity;
98 struct eiointc_priv *priv = d->domain->host_data;
99
100 raw_spin_lock_irqsave(&affinity_lock, flags);
101
102 cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
103 cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
104
105 if (cpumask_empty(&intersect_affinity)) {
106 raw_spin_unlock_irqrestore(&affinity_lock, flags);
107 return -EINVAL;
108 }
109 cpu = cpumask_first(&intersect_affinity);
110
111 vector = d->hwirq;
112 regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
113
114 /* Mask target vector */
115 csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
116 0x0, priv->node * CORES_PER_EIO_NODE);
117
118 /* Set route for target vector */
119 eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
120
121 /* Unmask target vector */
122 csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
123 0x0, priv->node * CORES_PER_EIO_NODE);
124
125 irq_data_update_effective_affinity(d, cpumask_of(cpu));
126
127 raw_spin_unlock_irqrestore(&affinity_lock, flags);
128
129 return IRQ_SET_MASK_OK;
130}
131
132static int eiointc_index(int node)
133{
134 int i;
135
136 for (i = 0; i < nr_pics; i++) {
137 if (node_isset(node, eiointc_priv[i]->node_map))
138 return i;
139 }
140
141 return -1;
142}
143
144static int eiointc_router_init(unsigned int cpu)
145{
146 int i, bit;
147 uint32_t data;
148 uint32_t node = cpu_to_eio_node(cpu);
149 uint32_t index = eiointc_index(node);
150
151 if (index < 0) {
152 pr_err("Error: invalid nodemap!\n");
153 return -1;
154 }
155
156 if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
157 eiointc_enable();
158
159 for (i = 0; i < VEC_COUNT / 32; i++) {
160 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
161 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
162 }
163
164 for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
165 bit = BIT(1 + index); /* Route to IP[1 + index] */
166 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
167 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
168 }
169
170 for (i = 0; i < VEC_COUNT / 4; i++) {
171 /* Route to Node-0 Core-0 */
172 if (index == 0)
173 bit = BIT(cpu_logical_map(0));
174 else
175 bit = (eiointc_priv[index]->node << 4) | 1;
176
177 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
178 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
179 }
180
181 for (i = 0; i < VEC_COUNT / 32; i++) {
182 data = 0xffffffff;
183 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
184 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
185 }
186 }
187
188 return 0;
189}
190
191static void eiointc_irq_dispatch(struct irq_desc *desc)
192{
193 int i;
194 u64 pending;
195 bool handled = false;
196 struct irq_chip *chip = irq_desc_get_chip(desc);
197 struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
198
199 chained_irq_enter(chip, desc);
200
201 for (i = 0; i < VEC_REG_COUNT; i++) {
202 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
203 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
204 while (pending) {
205 int bit = __ffs(pending);
206 int irq = bit + VEC_COUNT_PER_REG * i;
207
208 generic_handle_domain_irq(priv->eiointc_domain, irq);
209 pending &= ~BIT(bit);
210 handled = true;
211 }
212 }
213
214 if (!handled)
215 spurious_interrupt();
216
217 chained_irq_exit(chip, desc);
218}
219
220static void eiointc_ack_irq(struct irq_data *d)
221{
222}
223
224static void eiointc_mask_irq(struct irq_data *d)
225{
226}
227
228static void eiointc_unmask_irq(struct irq_data *d)
229{
230}
231
232static struct irq_chip eiointc_irq_chip = {
233 .name = "EIOINTC",
234 .irq_ack = eiointc_ack_irq,
235 .irq_mask = eiointc_mask_irq,
236 .irq_unmask = eiointc_unmask_irq,
237 .irq_set_affinity = eiointc_set_irq_affinity,
238};
239
240static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
241 unsigned int nr_irqs, void *arg)
242{
243 int ret;
244 unsigned int i, type;
245 unsigned long hwirq = 0;
246 struct eiointc *priv = domain->host_data;
247
248 ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
249 if (ret)
250 return ret;
251
252 for (i = 0; i < nr_irqs; i++) {
253 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
254 priv, handle_edge_irq, NULL, NULL);
255 }
256
257 return 0;
258}
259
260static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
261 unsigned int nr_irqs)
262{
263 int i;
264
265 for (i = 0; i < nr_irqs; i++) {
266 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
267
268 irq_set_handler(virq + i, NULL);
269 irq_domain_reset_irq_data(d);
270 }
271}
272
273static const struct irq_domain_ops eiointc_domain_ops = {
274 .translate = irq_domain_translate_onecell,
275 .alloc = eiointc_domain_alloc,
276 .free = eiointc_domain_free,
277};
278
279static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
280{
281 int i;
282
283 if (cpu_has_flatmode)
284 node = cpu_to_node(node * CORES_PER_EIO_NODE);
285
286 for (i = 0; i < MAX_IO_PICS; i++) {
287 if (node == vec_group[i].node) {
288 vec_group[i].parent = parent;
289 return;
290 }
291 }
292}
293
294static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
295{
296 int i;
297
298 for (i = 0; i < MAX_IO_PICS; i++) {
299 if (node == vec_group[i].node)
300 return vec_group[i].parent;
301 }
302 return NULL;
303}
304
305static int eiointc_suspend(void)
306{
307 return 0;
308}
309
310static void eiointc_resume(void)
311{
312 int i, j;
313 struct irq_desc *desc;
314 struct irq_data *irq_data;
315
316 eiointc_router_init(0);
317
318 for (i = 0; i < nr_pics; i++) {
319 for (j = 0; j < VEC_COUNT; j++) {
320 desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j);
321 if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
322 raw_spin_lock(&desc->lock);
323 irq_data = &desc->irq_data;
324 eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
325 raw_spin_unlock(&desc->lock);
326 }
327 }
328 }
329}
330
331static struct syscore_ops eiointc_syscore_ops = {
332 .suspend = eiointc_suspend,
333 .resume = eiointc_resume,
334};
335
336static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
337 const unsigned long end)
338{
339 struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
340 unsigned int node = (pchpic_entry->address >> 44) & 0xf;
341 struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
342
343 if (parent)
344 return pch_pic_acpi_init(parent, pchpic_entry);
345
346 return -EINVAL;
347}
348
349static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
350 const unsigned long end)
351{
352 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
353 struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
354
355 if (parent)
356 return pch_msi_acpi_init(parent, pchmsi_entry);
357
358 return -EINVAL;
359}
360
361static int __init acpi_cascade_irqdomain_init(void)
362{
363 int r;
364
365 r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0);
366 if (r < 0)
367 return r;
368
369 r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
370 if (r < 0)
371 return r;
372
373 return 0;
374}
375
376int __init eiointc_acpi_init(struct irq_domain *parent,
377 struct acpi_madt_eio_pic *acpi_eiointc)
378{
379 int i, ret, parent_irq;
380 unsigned long node_map;
381 struct eiointc_priv *priv;
382
383 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
384 if (!priv)
385 return -ENOMEM;
386
387 priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
388 acpi_eiointc->node);
389 if (!priv->domain_handle) {
390 pr_err("Unable to allocate domain handle\n");
391 goto out_free_priv;
392 }
393
394 priv->node = acpi_eiointc->node;
395 node_map = acpi_eiointc->node_map ? : -1ULL;
396
397 for_each_possible_cpu(i) {
398 if (node_map & (1ULL << cpu_to_eio_node(i))) {
399 node_set(cpu_to_eio_node(i), priv->node_map);
400 cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
401 }
402 }
403
404 /* Setup IRQ domain */
405 priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
406 &eiointc_domain_ops, priv);
407 if (!priv->eiointc_domain) {
408 pr_err("loongson-eiointc: cannot add IRQ domain\n");
409 goto out_free_handle;
410 }
411
412 eiointc_priv[nr_pics++] = priv;
413
414 eiointc_router_init(0);
415
416 parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
417 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
418
419 register_syscore_ops(&eiointc_syscore_ops);
420 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
421 "irqchip/loongarch/intc:starting",
422 eiointc_router_init, NULL);
423
424 acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
425 acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
426 ret = acpi_cascade_irqdomain_init();
427
428 return ret;
429
430out_free_handle:
431 irq_domain_free_fwnode(priv->domain_handle);
432 priv->domain_handle = NULL;
433out_free_priv:
434 kfree(priv);
435
436 return -ENOMEM;
437}