Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ARM GIC v2m MSI(-X) support
4 * Support for Message Signaled Interrupts for systems that
5 * implement ARM Generic Interrupt Controller: GICv2m.
6 *
7 * Copyright (C) 2014 Advanced Micro Devices, Inc.
8 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
9 * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
10 * Brandon Anderson <brandon.anderson@amd.com>
11 */
12
13#define pr_fmt(fmt) "GICv2m: " fmt
14
15#include <linux/acpi.h>
16#include <linux/dma-iommu.h>
17#include <linux/irq.h>
18#include <linux/irqdomain.h>
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/msi.h>
22#include <linux/of_address.h>
23#include <linux/of_pci.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/irqchip/arm-gic.h>
27
28/*
29* MSI_TYPER:
30* [31:26] Reserved
31* [25:16] lowest SPI assigned to MSI
32* [15:10] Reserved
33* [9:0] Numer of SPIs assigned to MSI
34*/
35#define V2M_MSI_TYPER 0x008
36#define V2M_MSI_TYPER_BASE_SHIFT 16
37#define V2M_MSI_TYPER_BASE_MASK 0x3FF
38#define V2M_MSI_TYPER_NUM_MASK 0x3FF
39#define V2M_MSI_SETSPI_NS 0x040
40#define V2M_MIN_SPI 32
41#define V2M_MAX_SPI 1019
42#define V2M_MSI_IIDR 0xFCC
43
44#define V2M_MSI_TYPER_BASE_SPI(x) \
45 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
46
47#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
48
49/* APM X-Gene with GICv2m MSI_IIDR register value */
50#define XGENE_GICV2M_MSI_IIDR 0x06000170
51
52/* Broadcom NS2 GICv2m MSI_IIDR register value */
53#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
54
55/* List of flags for specific v2m implementation */
56#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
57#define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002
58
59static LIST_HEAD(v2m_nodes);
60static DEFINE_SPINLOCK(v2m_lock);
61
62struct v2m_data {
63 struct list_head entry;
64 struct fwnode_handle *fwnode;
65 struct resource res; /* GICv2m resource */
66 void __iomem *base; /* GICv2m virt address */
67 u32 spi_start; /* The SPI number that MSIs start */
68 u32 nr_spis; /* The number of SPIs for MSIs */
69 u32 spi_offset; /* offset to be subtracted from SPI number */
70 unsigned long *bm; /* MSI vector bitmap */
71 u32 flags; /* v2m flags for specific implementation */
72};
73
74static void gicv2m_mask_msi_irq(struct irq_data *d)
75{
76 pci_msi_mask_irq(d);
77 irq_chip_mask_parent(d);
78}
79
80static void gicv2m_unmask_msi_irq(struct irq_data *d)
81{
82 pci_msi_unmask_irq(d);
83 irq_chip_unmask_parent(d);
84}
85
86static struct irq_chip gicv2m_msi_irq_chip = {
87 .name = "MSI",
88 .irq_mask = gicv2m_mask_msi_irq,
89 .irq_unmask = gicv2m_unmask_msi_irq,
90 .irq_eoi = irq_chip_eoi_parent,
91 .irq_write_msi_msg = pci_msi_domain_write_msg,
92};
93
94static struct msi_domain_info gicv2m_msi_domain_info = {
95 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
96 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
97 .chip = &gicv2m_msi_irq_chip,
98};
99
100static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
101{
102 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
103 return v2m->res.start | ((hwirq - 32) << 3);
104 else
105 return v2m->res.start + V2M_MSI_SETSPI_NS;
106}
107
108static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
109{
110 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
111 phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
112
113 msg->address_hi = upper_32_bits(addr);
114 msg->address_lo = lower_32_bits(addr);
115
116 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
117 msg->data = 0;
118 else
119 msg->data = data->hwirq;
120 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
121 msg->data -= v2m->spi_offset;
122
123 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
124}
125
126static struct irq_chip gicv2m_irq_chip = {
127 .name = "GICv2m",
128 .irq_mask = irq_chip_mask_parent,
129 .irq_unmask = irq_chip_unmask_parent,
130 .irq_eoi = irq_chip_eoi_parent,
131 .irq_set_affinity = irq_chip_set_affinity_parent,
132 .irq_compose_msi_msg = gicv2m_compose_msi_msg,
133};
134
135static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
136 unsigned int virq,
137 irq_hw_number_t hwirq)
138{
139 struct irq_fwspec fwspec;
140 struct irq_data *d;
141 int err;
142
143 if (is_of_node(domain->parent->fwnode)) {
144 fwspec.fwnode = domain->parent->fwnode;
145 fwspec.param_count = 3;
146 fwspec.param[0] = 0;
147 fwspec.param[1] = hwirq - 32;
148 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
149 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
150 fwspec.fwnode = domain->parent->fwnode;
151 fwspec.param_count = 2;
152 fwspec.param[0] = hwirq;
153 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
154 } else {
155 return -EINVAL;
156 }
157
158 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
159 if (err)
160 return err;
161
162 /* Configure the interrupt line to be edge */
163 d = irq_domain_get_irq_data(domain->parent, virq);
164 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
165 return 0;
166}
167
168static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
169 int nr_irqs)
170{
171 spin_lock(&v2m_lock);
172 bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
173 get_count_order(nr_irqs));
174 spin_unlock(&v2m_lock);
175}
176
177static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
178 unsigned int nr_irqs, void *args)
179{
180 msi_alloc_info_t *info = args;
181 struct v2m_data *v2m = NULL, *tmp;
182 int hwirq, offset, i, err = 0;
183
184 spin_lock(&v2m_lock);
185 list_for_each_entry(tmp, &v2m_nodes, entry) {
186 offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
187 get_count_order(nr_irqs));
188 if (offset >= 0) {
189 v2m = tmp;
190 break;
191 }
192 }
193 spin_unlock(&v2m_lock);
194
195 if (!v2m)
196 return -ENOSPC;
197
198 hwirq = v2m->spi_start + offset;
199
200 err = iommu_dma_prepare_msi(info->desc,
201 gicv2m_get_msi_addr(v2m, hwirq));
202 if (err)
203 return err;
204
205 for (i = 0; i < nr_irqs; i++) {
206 err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
207 if (err)
208 goto fail;
209
210 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
211 &gicv2m_irq_chip, v2m);
212 }
213
214 return 0;
215
216fail:
217 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
218 gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
219 return err;
220}
221
222static void gicv2m_irq_domain_free(struct irq_domain *domain,
223 unsigned int virq, unsigned int nr_irqs)
224{
225 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
226 struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
227
228 gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
229 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
230}
231
232static const struct irq_domain_ops gicv2m_domain_ops = {
233 .alloc = gicv2m_irq_domain_alloc,
234 .free = gicv2m_irq_domain_free,
235};
236
237static bool is_msi_spi_valid(u32 base, u32 num)
238{
239 if (base < V2M_MIN_SPI) {
240 pr_err("Invalid MSI base SPI (base:%u)\n", base);
241 return false;
242 }
243
244 if ((num == 0) || (base + num > V2M_MAX_SPI)) {
245 pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
246 num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
247 return false;
248 }
249
250 return true;
251}
252
253static struct irq_chip gicv2m_pmsi_irq_chip = {
254 .name = "pMSI",
255};
256
257static struct msi_domain_ops gicv2m_pmsi_ops = {
258};
259
260static struct msi_domain_info gicv2m_pmsi_domain_info = {
261 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
262 .ops = &gicv2m_pmsi_ops,
263 .chip = &gicv2m_pmsi_irq_chip,
264};
265
266static void gicv2m_teardown(void)
267{
268 struct v2m_data *v2m, *tmp;
269
270 list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
271 list_del(&v2m->entry);
272 kfree(v2m->bm);
273 iounmap(v2m->base);
274 of_node_put(to_of_node(v2m->fwnode));
275 if (is_fwnode_irqchip(v2m->fwnode))
276 irq_domain_free_fwnode(v2m->fwnode);
277 kfree(v2m);
278 }
279}
280
281static int gicv2m_allocate_domains(struct irq_domain *parent)
282{
283 struct irq_domain *inner_domain, *pci_domain, *plat_domain;
284 struct v2m_data *v2m;
285
286 v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
287 if (!v2m)
288 return 0;
289
290 inner_domain = irq_domain_create_tree(v2m->fwnode,
291 &gicv2m_domain_ops, v2m);
292 if (!inner_domain) {
293 pr_err("Failed to create GICv2m domain\n");
294 return -ENOMEM;
295 }
296
297 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
298 inner_domain->parent = parent;
299 pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
300 &gicv2m_msi_domain_info,
301 inner_domain);
302 plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
303 &gicv2m_pmsi_domain_info,
304 inner_domain);
305 if (!pci_domain || !plat_domain) {
306 pr_err("Failed to create MSI domains\n");
307 if (plat_domain)
308 irq_domain_remove(plat_domain);
309 if (pci_domain)
310 irq_domain_remove(pci_domain);
311 irq_domain_remove(inner_domain);
312 return -ENOMEM;
313 }
314
315 return 0;
316}
317
318static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
319 u32 spi_start, u32 nr_spis,
320 struct resource *res, u32 flags)
321{
322 int ret;
323 struct v2m_data *v2m;
324
325 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
326 if (!v2m) {
327 pr_err("Failed to allocate struct v2m_data.\n");
328 return -ENOMEM;
329 }
330
331 INIT_LIST_HEAD(&v2m->entry);
332 v2m->fwnode = fwnode;
333 v2m->flags = flags;
334
335 memcpy(&v2m->res, res, sizeof(struct resource));
336
337 v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
338 if (!v2m->base) {
339 pr_err("Failed to map GICv2m resource\n");
340 ret = -ENOMEM;
341 goto err_free_v2m;
342 }
343
344 if (spi_start && nr_spis) {
345 v2m->spi_start = spi_start;
346 v2m->nr_spis = nr_spis;
347 } else {
348 u32 typer;
349
350 /* Graviton should always have explicit spi_start/nr_spis */
351 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) {
352 ret = -EINVAL;
353 goto err_iounmap;
354 }
355 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
356
357 v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
358 v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
359 }
360
361 if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
362 ret = -EINVAL;
363 goto err_iounmap;
364 }
365
366 /*
367 * APM X-Gene GICv2m implementation has an erratum where
368 * the MSI data needs to be the offset from the spi_start
369 * in order to trigger the correct MSI interrupt. This is
370 * different from the standard GICv2m implementation where
371 * the MSI data is the absolute value within the range from
372 * spi_start to (spi_start + num_spis).
373 *
374 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
375 * is 'spi_number - 32'
376 *
377 * Reading that register fails on the Graviton implementation
378 */
379 if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) {
380 switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
381 case XGENE_GICV2M_MSI_IIDR:
382 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
383 v2m->spi_offset = v2m->spi_start;
384 break;
385 case BCM_NS2_GICV2M_MSI_IIDR:
386 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
387 v2m->spi_offset = 32;
388 break;
389 }
390 }
391 v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
392 GFP_KERNEL);
393 if (!v2m->bm) {
394 ret = -ENOMEM;
395 goto err_iounmap;
396 }
397
398 list_add_tail(&v2m->entry, &v2m_nodes);
399
400 pr_info("range%pR, SPI[%d:%d]\n", res,
401 v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
402 return 0;
403
404err_iounmap:
405 iounmap(v2m->base);
406err_free_v2m:
407 kfree(v2m);
408 return ret;
409}
410
411static struct of_device_id gicv2m_device_id[] = {
412 { .compatible = "arm,gic-v2m-frame", },
413 {},
414};
415
416static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
417 struct irq_domain *parent)
418{
419 int ret = 0;
420 struct device_node *node = to_of_node(parent_handle);
421 struct device_node *child;
422
423 for (child = of_find_matching_node(node, gicv2m_device_id); child;
424 child = of_find_matching_node(child, gicv2m_device_id)) {
425 u32 spi_start = 0, nr_spis = 0;
426 struct resource res;
427
428 if (!of_find_property(child, "msi-controller", NULL))
429 continue;
430
431 ret = of_address_to_resource(child, 0, &res);
432 if (ret) {
433 pr_err("Failed to allocate v2m resource.\n");
434 break;
435 }
436
437 if (!of_property_read_u32(child, "arm,msi-base-spi",
438 &spi_start) &&
439 !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
440 pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
441 spi_start, nr_spis);
442
443 ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
444 &res, 0);
445 if (ret) {
446 of_node_put(child);
447 break;
448 }
449 }
450
451 if (!ret)
452 ret = gicv2m_allocate_domains(parent);
453 if (ret)
454 gicv2m_teardown();
455 return ret;
456}
457
458#ifdef CONFIG_ACPI
459static int acpi_num_msi;
460
461static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
462{
463 struct v2m_data *data;
464
465 if (WARN_ON(acpi_num_msi <= 0))
466 return NULL;
467
468 /* We only return the fwnode of the first MSI frame. */
469 data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
470 if (!data)
471 return NULL;
472
473 return data->fwnode;
474}
475
476static bool acpi_check_amazon_graviton_quirks(void)
477{
478 static struct acpi_table_madt *madt;
479 acpi_status status;
480 bool rc = false;
481
482#define ACPI_AMZN_OEM_ID "AMAZON"
483
484 status = acpi_get_table(ACPI_SIG_MADT, 0,
485 (struct acpi_table_header **)&madt);
486
487 if (ACPI_FAILURE(status) || !madt)
488 return rc;
489 rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE);
490 acpi_put_table((struct acpi_table_header *)madt);
491
492 return rc;
493}
494
495static int __init
496acpi_parse_madt_msi(union acpi_subtable_headers *header,
497 const unsigned long end)
498{
499 int ret;
500 struct resource res;
501 u32 spi_start = 0, nr_spis = 0;
502 struct acpi_madt_generic_msi_frame *m;
503 struct fwnode_handle *fwnode;
504 u32 flags = 0;
505
506 m = (struct acpi_madt_generic_msi_frame *)header;
507 if (BAD_MADT_ENTRY(m, end))
508 return -EINVAL;
509
510 res.start = m->base_address;
511 res.end = m->base_address + SZ_4K - 1;
512 res.flags = IORESOURCE_MEM;
513
514 if (acpi_check_amazon_graviton_quirks()) {
515 pr_info("applying Amazon Graviton quirk\n");
516 res.end = res.start + SZ_8K - 1;
517 flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
518 gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
519 }
520
521 if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
522 spi_start = m->spi_base;
523 nr_spis = m->spi_count;
524
525 pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
526 spi_start, nr_spis);
527 }
528
529 fwnode = irq_domain_alloc_fwnode(&res.start);
530 if (!fwnode) {
531 pr_err("Unable to allocate GICv2m domain token\n");
532 return -EINVAL;
533 }
534
535 ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags);
536 if (ret)
537 irq_domain_free_fwnode(fwnode);
538
539 return ret;
540}
541
542static int __init gicv2m_acpi_init(struct irq_domain *parent)
543{
544 int ret;
545
546 if (acpi_num_msi > 0)
547 return 0;
548
549 acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
550 acpi_parse_madt_msi, 0);
551
552 if (acpi_num_msi <= 0)
553 goto err_out;
554
555 ret = gicv2m_allocate_domains(parent);
556 if (ret)
557 goto err_out;
558
559 pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
560
561 return 0;
562
563err_out:
564 gicv2m_teardown();
565 return -EINVAL;
566}
567#else /* CONFIG_ACPI */
568static int __init gicv2m_acpi_init(struct irq_domain *parent)
569{
570 return -EINVAL;
571}
572#endif /* CONFIG_ACPI */
573
574int __init gicv2m_init(struct fwnode_handle *parent_handle,
575 struct irq_domain *parent)
576{
577 if (is_of_node(parent_handle))
578 return gicv2m_of_init(parent_handle, parent);
579
580 return gicv2m_acpi_init(parent);
581}
1/*
2 * ARM GIC v2m MSI(-X) support
3 * Support for Message Signaled Interrupts for systems that
4 * implement ARM Generic Interrupt Controller: GICv2m.
5 *
6 * Copyright (C) 2014 Advanced Micro Devices, Inc.
7 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
8 * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
9 * Brandon Anderson <brandon.anderson@amd.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
14 */
15
16#define pr_fmt(fmt) "GICv2m: " fmt
17
18#include <linux/acpi.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/kernel.h>
22#include <linux/msi.h>
23#include <linux/of_address.h>
24#include <linux/of_pci.h>
25#include <linux/slab.h>
26#include <linux/spinlock.h>
27
28/*
29* MSI_TYPER:
30* [31:26] Reserved
31* [25:16] lowest SPI assigned to MSI
32* [15:10] Reserved
33* [9:0] Numer of SPIs assigned to MSI
34*/
35#define V2M_MSI_TYPER 0x008
36#define V2M_MSI_TYPER_BASE_SHIFT 16
37#define V2M_MSI_TYPER_BASE_MASK 0x3FF
38#define V2M_MSI_TYPER_NUM_MASK 0x3FF
39#define V2M_MSI_SETSPI_NS 0x040
40#define V2M_MIN_SPI 32
41#define V2M_MAX_SPI 1019
42#define V2M_MSI_IIDR 0xFCC
43
44#define V2M_MSI_TYPER_BASE_SPI(x) \
45 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
46
47#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
48
49/* APM X-Gene with GICv2m MSI_IIDR register value */
50#define XGENE_GICV2M_MSI_IIDR 0x06000170
51
52/* List of flags for specific v2m implementation */
53#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
54
55static LIST_HEAD(v2m_nodes);
56static DEFINE_SPINLOCK(v2m_lock);
57
58struct v2m_data {
59 struct list_head entry;
60 struct fwnode_handle *fwnode;
61 struct resource res; /* GICv2m resource */
62 void __iomem *base; /* GICv2m virt address */
63 u32 spi_start; /* The SPI number that MSIs start */
64 u32 nr_spis; /* The number of SPIs for MSIs */
65 unsigned long *bm; /* MSI vector bitmap */
66 u32 flags; /* v2m flags for specific implementation */
67};
68
69static void gicv2m_mask_msi_irq(struct irq_data *d)
70{
71 pci_msi_mask_irq(d);
72 irq_chip_mask_parent(d);
73}
74
75static void gicv2m_unmask_msi_irq(struct irq_data *d)
76{
77 pci_msi_unmask_irq(d);
78 irq_chip_unmask_parent(d);
79}
80
81static struct irq_chip gicv2m_msi_irq_chip = {
82 .name = "MSI",
83 .irq_mask = gicv2m_mask_msi_irq,
84 .irq_unmask = gicv2m_unmask_msi_irq,
85 .irq_eoi = irq_chip_eoi_parent,
86 .irq_write_msi_msg = pci_msi_domain_write_msg,
87};
88
89static struct msi_domain_info gicv2m_msi_domain_info = {
90 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
91 MSI_FLAG_PCI_MSIX),
92 .chip = &gicv2m_msi_irq_chip,
93};
94
95static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
96{
97 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
98 phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
99
100 msg->address_hi = upper_32_bits(addr);
101 msg->address_lo = lower_32_bits(addr);
102 msg->data = data->hwirq;
103
104 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
105 msg->data -= v2m->spi_start;
106}
107
108static struct irq_chip gicv2m_irq_chip = {
109 .name = "GICv2m",
110 .irq_mask = irq_chip_mask_parent,
111 .irq_unmask = irq_chip_unmask_parent,
112 .irq_eoi = irq_chip_eoi_parent,
113 .irq_set_affinity = irq_chip_set_affinity_parent,
114 .irq_compose_msi_msg = gicv2m_compose_msi_msg,
115};
116
117static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
118 unsigned int virq,
119 irq_hw_number_t hwirq)
120{
121 struct irq_fwspec fwspec;
122 struct irq_data *d;
123 int err;
124
125 if (is_of_node(domain->parent->fwnode)) {
126 fwspec.fwnode = domain->parent->fwnode;
127 fwspec.param_count = 3;
128 fwspec.param[0] = 0;
129 fwspec.param[1] = hwirq - 32;
130 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
131 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
132 fwspec.fwnode = domain->parent->fwnode;
133 fwspec.param_count = 2;
134 fwspec.param[0] = hwirq;
135 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
136 } else {
137 return -EINVAL;
138 }
139
140 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
141 if (err)
142 return err;
143
144 /* Configure the interrupt line to be edge */
145 d = irq_domain_get_irq_data(domain->parent, virq);
146 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
147 return 0;
148}
149
150static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
151{
152 int pos;
153
154 pos = hwirq - v2m->spi_start;
155 if (pos < 0 || pos >= v2m->nr_spis) {
156 pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
157 return;
158 }
159
160 spin_lock(&v2m_lock);
161 __clear_bit(pos, v2m->bm);
162 spin_unlock(&v2m_lock);
163}
164
165static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
166 unsigned int nr_irqs, void *args)
167{
168 struct v2m_data *v2m = NULL, *tmp;
169 int hwirq, offset, err = 0;
170
171 spin_lock(&v2m_lock);
172 list_for_each_entry(tmp, &v2m_nodes, entry) {
173 offset = find_first_zero_bit(tmp->bm, tmp->nr_spis);
174 if (offset < tmp->nr_spis) {
175 __set_bit(offset, tmp->bm);
176 v2m = tmp;
177 break;
178 }
179 }
180 spin_unlock(&v2m_lock);
181
182 if (!v2m)
183 return -ENOSPC;
184
185 hwirq = v2m->spi_start + offset;
186
187 err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
188 if (err) {
189 gicv2m_unalloc_msi(v2m, hwirq);
190 return err;
191 }
192
193 irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
194 &gicv2m_irq_chip, v2m);
195
196 return 0;
197}
198
199static void gicv2m_irq_domain_free(struct irq_domain *domain,
200 unsigned int virq, unsigned int nr_irqs)
201{
202 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
203 struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
204
205 BUG_ON(nr_irqs != 1);
206 gicv2m_unalloc_msi(v2m, d->hwirq);
207 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
208}
209
210static const struct irq_domain_ops gicv2m_domain_ops = {
211 .alloc = gicv2m_irq_domain_alloc,
212 .free = gicv2m_irq_domain_free,
213};
214
215static bool is_msi_spi_valid(u32 base, u32 num)
216{
217 if (base < V2M_MIN_SPI) {
218 pr_err("Invalid MSI base SPI (base:%u)\n", base);
219 return false;
220 }
221
222 if ((num == 0) || (base + num > V2M_MAX_SPI)) {
223 pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
224 num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
225 return false;
226 }
227
228 return true;
229}
230
231static struct irq_chip gicv2m_pmsi_irq_chip = {
232 .name = "pMSI",
233};
234
235static struct msi_domain_ops gicv2m_pmsi_ops = {
236};
237
238static struct msi_domain_info gicv2m_pmsi_domain_info = {
239 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
240 .ops = &gicv2m_pmsi_ops,
241 .chip = &gicv2m_pmsi_irq_chip,
242};
243
244static void gicv2m_teardown(void)
245{
246 struct v2m_data *v2m, *tmp;
247
248 list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
249 list_del(&v2m->entry);
250 kfree(v2m->bm);
251 iounmap(v2m->base);
252 of_node_put(to_of_node(v2m->fwnode));
253 if (is_fwnode_irqchip(v2m->fwnode))
254 irq_domain_free_fwnode(v2m->fwnode);
255 kfree(v2m);
256 }
257}
258
259static int gicv2m_allocate_domains(struct irq_domain *parent)
260{
261 struct irq_domain *inner_domain, *pci_domain, *plat_domain;
262 struct v2m_data *v2m;
263
264 v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
265 if (!v2m)
266 return 0;
267
268 inner_domain = irq_domain_create_tree(v2m->fwnode,
269 &gicv2m_domain_ops, v2m);
270 if (!inner_domain) {
271 pr_err("Failed to create GICv2m domain\n");
272 return -ENOMEM;
273 }
274
275 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
276 inner_domain->parent = parent;
277 pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
278 &gicv2m_msi_domain_info,
279 inner_domain);
280 plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
281 &gicv2m_pmsi_domain_info,
282 inner_domain);
283 if (!pci_domain || !plat_domain) {
284 pr_err("Failed to create MSI domains\n");
285 if (plat_domain)
286 irq_domain_remove(plat_domain);
287 if (pci_domain)
288 irq_domain_remove(pci_domain);
289 irq_domain_remove(inner_domain);
290 return -ENOMEM;
291 }
292
293 return 0;
294}
295
296static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
297 u32 spi_start, u32 nr_spis,
298 struct resource *res)
299{
300 int ret;
301 struct v2m_data *v2m;
302
303 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
304 if (!v2m) {
305 pr_err("Failed to allocate struct v2m_data.\n");
306 return -ENOMEM;
307 }
308
309 INIT_LIST_HEAD(&v2m->entry);
310 v2m->fwnode = fwnode;
311
312 memcpy(&v2m->res, res, sizeof(struct resource));
313
314 v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
315 if (!v2m->base) {
316 pr_err("Failed to map GICv2m resource\n");
317 ret = -ENOMEM;
318 goto err_free_v2m;
319 }
320
321 if (spi_start && nr_spis) {
322 v2m->spi_start = spi_start;
323 v2m->nr_spis = nr_spis;
324 } else {
325 u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
326
327 v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
328 v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
329 }
330
331 if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
332 ret = -EINVAL;
333 goto err_iounmap;
334 }
335
336 /*
337 * APM X-Gene GICv2m implementation has an erratum where
338 * the MSI data needs to be the offset from the spi_start
339 * in order to trigger the correct MSI interrupt. This is
340 * different from the standard GICv2m implementation where
341 * the MSI data is the absolute value within the range from
342 * spi_start to (spi_start + num_spis).
343 */
344 if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
345 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
346
347 v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
348 GFP_KERNEL);
349 if (!v2m->bm) {
350 ret = -ENOMEM;
351 goto err_iounmap;
352 }
353
354 list_add_tail(&v2m->entry, &v2m_nodes);
355
356 pr_info("range%pR, SPI[%d:%d]\n", res,
357 v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
358 return 0;
359
360err_iounmap:
361 iounmap(v2m->base);
362err_free_v2m:
363 kfree(v2m);
364 return ret;
365}
366
367static struct of_device_id gicv2m_device_id[] = {
368 { .compatible = "arm,gic-v2m-frame", },
369 {},
370};
371
372static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
373 struct irq_domain *parent)
374{
375 int ret = 0;
376 struct device_node *node = to_of_node(parent_handle);
377 struct device_node *child;
378
379 for (child = of_find_matching_node(node, gicv2m_device_id); child;
380 child = of_find_matching_node(child, gicv2m_device_id)) {
381 u32 spi_start = 0, nr_spis = 0;
382 struct resource res;
383
384 if (!of_find_property(child, "msi-controller", NULL))
385 continue;
386
387 ret = of_address_to_resource(child, 0, &res);
388 if (ret) {
389 pr_err("Failed to allocate v2m resource.\n");
390 break;
391 }
392
393 if (!of_property_read_u32(child, "arm,msi-base-spi",
394 &spi_start) &&
395 !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
396 pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
397 spi_start, nr_spis);
398
399 ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
400 if (ret) {
401 of_node_put(child);
402 break;
403 }
404 }
405
406 if (!ret)
407 ret = gicv2m_allocate_domains(parent);
408 if (ret)
409 gicv2m_teardown();
410 return ret;
411}
412
413#ifdef CONFIG_ACPI
414static int acpi_num_msi;
415
416static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
417{
418 struct v2m_data *data;
419
420 if (WARN_ON(acpi_num_msi <= 0))
421 return NULL;
422
423 /* We only return the fwnode of the first MSI frame. */
424 data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
425 if (!data)
426 return NULL;
427
428 return data->fwnode;
429}
430
431static int __init
432acpi_parse_madt_msi(struct acpi_subtable_header *header,
433 const unsigned long end)
434{
435 int ret;
436 struct resource res;
437 u32 spi_start = 0, nr_spis = 0;
438 struct acpi_madt_generic_msi_frame *m;
439 struct fwnode_handle *fwnode;
440
441 m = (struct acpi_madt_generic_msi_frame *)header;
442 if (BAD_MADT_ENTRY(m, end))
443 return -EINVAL;
444
445 res.start = m->base_address;
446 res.end = m->base_address + SZ_4K - 1;
447 res.flags = IORESOURCE_MEM;
448
449 if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
450 spi_start = m->spi_base;
451 nr_spis = m->spi_count;
452
453 pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
454 spi_start, nr_spis);
455 }
456
457 fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
458 if (!fwnode) {
459 pr_err("Unable to allocate GICv2m domain token\n");
460 return -EINVAL;
461 }
462
463 ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
464 if (ret)
465 irq_domain_free_fwnode(fwnode);
466
467 return ret;
468}
469
470static int __init gicv2m_acpi_init(struct irq_domain *parent)
471{
472 int ret;
473
474 if (acpi_num_msi > 0)
475 return 0;
476
477 acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
478 acpi_parse_madt_msi, 0);
479
480 if (acpi_num_msi <= 0)
481 goto err_out;
482
483 ret = gicv2m_allocate_domains(parent);
484 if (ret)
485 goto err_out;
486
487 pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
488
489 return 0;
490
491err_out:
492 gicv2m_teardown();
493 return -EINVAL;
494}
495#else /* CONFIG_ACPI */
496static int __init gicv2m_acpi_init(struct irq_domain *parent)
497{
498 return -EINVAL;
499}
500#endif /* CONFIG_ACPI */
501
502int __init gicv2m_init(struct fwnode_handle *parent_handle,
503 struct irq_domain *parent)
504{
505 if (is_of_node(parent_handle))
506 return gicv2m_of_init(parent_handle, parent);
507
508 return gicv2m_acpi_init(parent);
509}