Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * ARM GIC v2m MSI(-X) support
  4 * Support for Message Signaled Interrupts for systems that
  5 * implement ARM Generic Interrupt Controller: GICv2m.
  6 *
  7 * Copyright (C) 2014 Advanced Micro Devices, Inc.
  8 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  9 *	    Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
 10 *	    Brandon Anderson <brandon.anderson@amd.com>
 
 
 
 
 11 */
 12
 13#define pr_fmt(fmt) "GICv2m: " fmt
 14
 15#include <linux/acpi.h>
 16#include <linux/dma-iommu.h>
 17#include <linux/irq.h>
 18#include <linux/irqdomain.h>
 19#include <linux/kernel.h>
 20#include <linux/pci.h>
 21#include <linux/msi.h>
 22#include <linux/of_address.h>
 23#include <linux/of_pci.h>
 24#include <linux/slab.h>
 25#include <linux/spinlock.h>
 26#include <linux/irqchip/arm-gic.h>
 27
 28/*
 29* MSI_TYPER:
 30*     [31:26] Reserved
 31*     [25:16] lowest SPI assigned to MSI
 32*     [15:10] Reserved
 33*     [9:0]   Numer of SPIs assigned to MSI
 34*/
 35#define V2M_MSI_TYPER		       0x008
 36#define V2M_MSI_TYPER_BASE_SHIFT       16
 37#define V2M_MSI_TYPER_BASE_MASK	       0x3FF
 38#define V2M_MSI_TYPER_NUM_MASK	       0x3FF
 39#define V2M_MSI_SETSPI_NS	       0x040
 40#define V2M_MIN_SPI		       32
 41#define V2M_MAX_SPI		       1019
 42#define V2M_MSI_IIDR		       0xFCC
 43
 44#define V2M_MSI_TYPER_BASE_SPI(x)      \
 45	       (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
 46
 47#define V2M_MSI_TYPER_NUM_SPI(x)       ((x) & V2M_MSI_TYPER_NUM_MASK)
 48
 49/* APM X-Gene with GICv2m MSI_IIDR register value */
 50#define XGENE_GICV2M_MSI_IIDR		0x06000170
 51
 52/* Broadcom NS2 GICv2m MSI_IIDR register value */
 53#define BCM_NS2_GICV2M_MSI_IIDR		0x0000013f
 54
 55/* List of flags for specific v2m implementation */
 56#define GICV2M_NEEDS_SPI_OFFSET		0x00000001
 57#define GICV2M_GRAVITON_ADDRESS_ONLY	0x00000002
 58
 59static LIST_HEAD(v2m_nodes);
 60static DEFINE_SPINLOCK(v2m_lock);
 61
 62struct v2m_data {
 63	struct list_head entry;
 64	struct fwnode_handle *fwnode;
 65	struct resource res;	/* GICv2m resource */
 66	void __iomem *base;	/* GICv2m virt address */
 67	u32 spi_start;		/* The SPI number that MSIs start */
 68	u32 nr_spis;		/* The number of SPIs for MSIs */
 69	u32 spi_offset;		/* offset to be subtracted from SPI number */
 70	unsigned long *bm;	/* MSI vector bitmap */
 71	u32 flags;		/* v2m flags for specific implementation */
 72};
 73
 74static void gicv2m_mask_msi_irq(struct irq_data *d)
 75{
 76	pci_msi_mask_irq(d);
 77	irq_chip_mask_parent(d);
 78}
 79
 80static void gicv2m_unmask_msi_irq(struct irq_data *d)
 81{
 82	pci_msi_unmask_irq(d);
 83	irq_chip_unmask_parent(d);
 84}
 85
 86static struct irq_chip gicv2m_msi_irq_chip = {
 87	.name			= "MSI",
 88	.irq_mask		= gicv2m_mask_msi_irq,
 89	.irq_unmask		= gicv2m_unmask_msi_irq,
 90	.irq_eoi		= irq_chip_eoi_parent,
 91	.irq_write_msi_msg	= pci_msi_domain_write_msg,
 92};
 93
 94static struct msi_domain_info gicv2m_msi_domain_info = {
 95	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 96		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
 97	.chip	= &gicv2m_msi_irq_chip,
 98};
 99
100static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
101{
102	if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
103		return v2m->res.start | ((hwirq - 32) << 3);
104	else
105		return v2m->res.start + V2M_MSI_SETSPI_NS;
106}
107
108static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
109{
110	struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
111	phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
112
113	msg->address_hi = upper_32_bits(addr);
114	msg->address_lo = lower_32_bits(addr);
 
115
116	if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
117		msg->data = 0;
118	else
119		msg->data = data->hwirq;
120	if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
121		msg->data -= v2m->spi_offset;
122
123	iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
124}
125
126static struct irq_chip gicv2m_irq_chip = {
127	.name			= "GICv2m",
128	.irq_mask		= irq_chip_mask_parent,
129	.irq_unmask		= irq_chip_unmask_parent,
130	.irq_eoi		= irq_chip_eoi_parent,
131	.irq_set_affinity	= irq_chip_set_affinity_parent,
132	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
133};
134
135static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
136				       unsigned int virq,
137				       irq_hw_number_t hwirq)
138{
139	struct irq_fwspec fwspec;
140	struct irq_data *d;
141	int err;
142
143	if (is_of_node(domain->parent->fwnode)) {
144		fwspec.fwnode = domain->parent->fwnode;
145		fwspec.param_count = 3;
146		fwspec.param[0] = 0;
147		fwspec.param[1] = hwirq - 32;
148		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
149	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
150		fwspec.fwnode = domain->parent->fwnode;
151		fwspec.param_count = 2;
152		fwspec.param[0] = hwirq;
153		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
154	} else {
155		return -EINVAL;
156	}
157
158	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
159	if (err)
160		return err;
161
162	/* Configure the interrupt line to be edge */
163	d = irq_domain_get_irq_data(domain->parent, virq);
164	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
165	return 0;
166}
167
168static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
169			       int nr_irqs)
170{
171	spin_lock(&v2m_lock);
172	bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
173			      get_count_order(nr_irqs));
174	spin_unlock(&v2m_lock);
175}
176
177static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
178				   unsigned int nr_irqs, void *args)
179{
180	msi_alloc_info_t *info = args;
181	struct v2m_data *v2m = NULL, *tmp;
182	int hwirq, offset, i, err = 0;
183
184	spin_lock(&v2m_lock);
185	list_for_each_entry(tmp, &v2m_nodes, entry) {
186		offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
187						 get_count_order(nr_irqs));
188		if (offset >= 0) {
189			v2m = tmp;
190			break;
191		}
192	}
193	spin_unlock(&v2m_lock);
194
195	if (!v2m)
196		return -ENOSPC;
197
198	hwirq = v2m->spi_start + offset;
199
200	err = iommu_dma_prepare_msi(info->desc,
201				    gicv2m_get_msi_addr(v2m, hwirq));
202	if (err)
203		return err;
204
205	for (i = 0; i < nr_irqs; i++) {
206		err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
207		if (err)
208			goto fail;
209
210		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
211					      &gicv2m_irq_chip, v2m);
212	}
213
214	return 0;
215
216fail:
217	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
218	gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
219	return err;
220}
221
222static void gicv2m_irq_domain_free(struct irq_domain *domain,
223				   unsigned int virq, unsigned int nr_irqs)
224{
225	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
226	struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
227
228	gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
229	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
230}
231
232static const struct irq_domain_ops gicv2m_domain_ops = {
233	.alloc			= gicv2m_irq_domain_alloc,
234	.free			= gicv2m_irq_domain_free,
235};
236
237static bool is_msi_spi_valid(u32 base, u32 num)
238{
239	if (base < V2M_MIN_SPI) {
240		pr_err("Invalid MSI base SPI (base:%u)\n", base);
241		return false;
242	}
243
244	if ((num == 0) || (base + num > V2M_MAX_SPI)) {
245		pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
246		       num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
247		return false;
248	}
249
250	return true;
251}
252
253static struct irq_chip gicv2m_pmsi_irq_chip = {
254	.name			= "pMSI",
255};
256
257static struct msi_domain_ops gicv2m_pmsi_ops = {
258};
259
260static struct msi_domain_info gicv2m_pmsi_domain_info = {
261	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
262	.ops	= &gicv2m_pmsi_ops,
263	.chip	= &gicv2m_pmsi_irq_chip,
264};
265
266static void gicv2m_teardown(void)
267{
268	struct v2m_data *v2m, *tmp;
269
270	list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
271		list_del(&v2m->entry);
272		kfree(v2m->bm);
273		iounmap(v2m->base);
274		of_node_put(to_of_node(v2m->fwnode));
275		if (is_fwnode_irqchip(v2m->fwnode))
276			irq_domain_free_fwnode(v2m->fwnode);
277		kfree(v2m);
278	}
279}
280
281static int gicv2m_allocate_domains(struct irq_domain *parent)
282{
283	struct irq_domain *inner_domain, *pci_domain, *plat_domain;
284	struct v2m_data *v2m;
285
286	v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
287	if (!v2m)
288		return 0;
289
290	inner_domain = irq_domain_create_tree(v2m->fwnode,
291					      &gicv2m_domain_ops, v2m);
292	if (!inner_domain) {
293		pr_err("Failed to create GICv2m domain\n");
294		return -ENOMEM;
295	}
296
297	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
298	inner_domain->parent = parent;
299	pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
300					       &gicv2m_msi_domain_info,
301					       inner_domain);
302	plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
303						     &gicv2m_pmsi_domain_info,
304						     inner_domain);
305	if (!pci_domain || !plat_domain) {
306		pr_err("Failed to create MSI domains\n");
307		if (plat_domain)
308			irq_domain_remove(plat_domain);
309		if (pci_domain)
310			irq_domain_remove(pci_domain);
311		irq_domain_remove(inner_domain);
312		return -ENOMEM;
313	}
314
315	return 0;
316}
317
318static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
319				  u32 spi_start, u32 nr_spis,
320				  struct resource *res, u32 flags)
321{
322	int ret;
323	struct v2m_data *v2m;
324
325	v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
326	if (!v2m) {
327		pr_err("Failed to allocate struct v2m_data.\n");
328		return -ENOMEM;
329	}
330
331	INIT_LIST_HEAD(&v2m->entry);
332	v2m->fwnode = fwnode;
333	v2m->flags = flags;
334
335	memcpy(&v2m->res, res, sizeof(struct resource));
336
337	v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
338	if (!v2m->base) {
339		pr_err("Failed to map GICv2m resource\n");
340		ret = -ENOMEM;
341		goto err_free_v2m;
342	}
343
344	if (spi_start && nr_spis) {
345		v2m->spi_start = spi_start;
346		v2m->nr_spis = nr_spis;
347	} else {
348		u32 typer;
349
350		/* Graviton should always have explicit spi_start/nr_spis */
351		if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) {
352			ret = -EINVAL;
353			goto err_iounmap;
354		}
355		typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
356
357		v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
358		v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
359	}
360
361	if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
362		ret = -EINVAL;
363		goto err_iounmap;
364	}
365
366	/*
367	 * APM X-Gene GICv2m implementation has an erratum where
368	 * the MSI data needs to be the offset from the spi_start
369	 * in order to trigger the correct MSI interrupt. This is
370	 * different from the standard GICv2m implementation where
371	 * the MSI data is the absolute value within the range from
372	 * spi_start to (spi_start + num_spis).
373	 *
374	 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
375	 * is 'spi_number - 32'
376	 *
377	 * Reading that register fails on the Graviton implementation
378	 */
379	if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) {
380		switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
381		case XGENE_GICV2M_MSI_IIDR:
382			v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
383			v2m->spi_offset = v2m->spi_start;
384			break;
385		case BCM_NS2_GICV2M_MSI_IIDR:
386			v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
387			v2m->spi_offset = 32;
388			break;
389		}
390	}
391	v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
 
392			  GFP_KERNEL);
393	if (!v2m->bm) {
394		ret = -ENOMEM;
395		goto err_iounmap;
396	}
397
398	list_add_tail(&v2m->entry, &v2m_nodes);
399
400	pr_info("range%pR, SPI[%d:%d]\n", res,
401		v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
402	return 0;
403
404err_iounmap:
405	iounmap(v2m->base);
406err_free_v2m:
407	kfree(v2m);
408	return ret;
409}
410
411static struct of_device_id gicv2m_device_id[] = {
412	{	.compatible	= "arm,gic-v2m-frame",	},
413	{},
414};
415
416static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
417				 struct irq_domain *parent)
418{
419	int ret = 0;
420	struct device_node *node = to_of_node(parent_handle);
421	struct device_node *child;
422
423	for (child = of_find_matching_node(node, gicv2m_device_id); child;
424	     child = of_find_matching_node(child, gicv2m_device_id)) {
425		u32 spi_start = 0, nr_spis = 0;
426		struct resource res;
427
428		if (!of_find_property(child, "msi-controller", NULL))
429			continue;
430
431		ret = of_address_to_resource(child, 0, &res);
432		if (ret) {
433			pr_err("Failed to allocate v2m resource.\n");
434			break;
435		}
436
437		if (!of_property_read_u32(child, "arm,msi-base-spi",
438					  &spi_start) &&
439		    !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
440			pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
441				spi_start, nr_spis);
442
443		ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
444				      &res, 0);
445		if (ret) {
446			of_node_put(child);
447			break;
448		}
449	}
450
451	if (!ret)
452		ret = gicv2m_allocate_domains(parent);
453	if (ret)
454		gicv2m_teardown();
455	return ret;
456}
457
458#ifdef CONFIG_ACPI
459static int acpi_num_msi;
460
461static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
462{
463	struct v2m_data *data;
464
465	if (WARN_ON(acpi_num_msi <= 0))
466		return NULL;
467
468	/* We only return the fwnode of the first MSI frame. */
469	data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
470	if (!data)
471		return NULL;
472
473	return data->fwnode;
474}
475
476static bool acpi_check_amazon_graviton_quirks(void)
477{
478	static struct acpi_table_madt *madt;
479	acpi_status status;
480	bool rc = false;
481
482#define ACPI_AMZN_OEM_ID		"AMAZON"
483
484	status = acpi_get_table(ACPI_SIG_MADT, 0,
485				(struct acpi_table_header **)&madt);
486
487	if (ACPI_FAILURE(status) || !madt)
488		return rc;
489	rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE);
490	acpi_put_table((struct acpi_table_header *)madt);
491
492	return rc;
493}
494
495static int __init
496acpi_parse_madt_msi(union acpi_subtable_headers *header,
497		    const unsigned long end)
498{
499	int ret;
500	struct resource res;
501	u32 spi_start = 0, nr_spis = 0;
502	struct acpi_madt_generic_msi_frame *m;
503	struct fwnode_handle *fwnode;
504	u32 flags = 0;
505
506	m = (struct acpi_madt_generic_msi_frame *)header;
507	if (BAD_MADT_ENTRY(m, end))
508		return -EINVAL;
509
510	res.start = m->base_address;
511	res.end = m->base_address + SZ_4K - 1;
512	res.flags = IORESOURCE_MEM;
513
514	if (acpi_check_amazon_graviton_quirks()) {
515		pr_info("applying Amazon Graviton quirk\n");
516		res.end = res.start + SZ_8K - 1;
517		flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
518		gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
519	}
520
521	if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
522		spi_start = m->spi_base;
523		nr_spis = m->spi_count;
524
525		pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
526			spi_start, nr_spis);
527	}
528
529	fwnode = irq_domain_alloc_fwnode(&res.start);
530	if (!fwnode) {
531		pr_err("Unable to allocate GICv2m domain token\n");
532		return -EINVAL;
533	}
534
535	ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags);
536	if (ret)
537		irq_domain_free_fwnode(fwnode);
538
539	return ret;
540}
541
542static int __init gicv2m_acpi_init(struct irq_domain *parent)
543{
544	int ret;
545
546	if (acpi_num_msi > 0)
547		return 0;
548
549	acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
550				      acpi_parse_madt_msi, 0);
551
552	if (acpi_num_msi <= 0)
553		goto err_out;
554
555	ret = gicv2m_allocate_domains(parent);
556	if (ret)
557		goto err_out;
558
559	pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
560
561	return 0;
562
563err_out:
564	gicv2m_teardown();
565	return -EINVAL;
566}
567#else /* CONFIG_ACPI */
568static int __init gicv2m_acpi_init(struct irq_domain *parent)
569{
570	return -EINVAL;
571}
572#endif /* CONFIG_ACPI */
573
574int __init gicv2m_init(struct fwnode_handle *parent_handle,
575		       struct irq_domain *parent)
576{
577	if (is_of_node(parent_handle))
578		return gicv2m_of_init(parent_handle, parent);
579
580	return gicv2m_acpi_init(parent);
581}
v4.17
 
  1/*
  2 * ARM GIC v2m MSI(-X) support
  3 * Support for Message Signaled Interrupts for systems that
  4 * implement ARM Generic Interrupt Controller: GICv2m.
  5 *
  6 * Copyright (C) 2014 Advanced Micro Devices, Inc.
  7 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  8 *	    Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
  9 *	    Brandon Anderson <brandon.anderson@amd.com>
 10 *
 11 * This program is free software; you can redistribute it and/or modify it
 12 * under the terms of the GNU General Public License version 2 as published
 13 * by the Free Software Foundation.
 14 */
 15
 16#define pr_fmt(fmt) "GICv2m: " fmt
 17
 18#include <linux/acpi.h>
 19#include <linux/dma-iommu.h>
 20#include <linux/irq.h>
 21#include <linux/irqdomain.h>
 22#include <linux/kernel.h>
 
 23#include <linux/msi.h>
 24#include <linux/of_address.h>
 25#include <linux/of_pci.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <linux/irqchip/arm-gic.h>
 29
 30/*
 31* MSI_TYPER:
 32*     [31:26] Reserved
 33*     [25:16] lowest SPI assigned to MSI
 34*     [15:10] Reserved
 35*     [9:0]   Numer of SPIs assigned to MSI
 36*/
 37#define V2M_MSI_TYPER		       0x008
 38#define V2M_MSI_TYPER_BASE_SHIFT       16
 39#define V2M_MSI_TYPER_BASE_MASK	       0x3FF
 40#define V2M_MSI_TYPER_NUM_MASK	       0x3FF
 41#define V2M_MSI_SETSPI_NS	       0x040
 42#define V2M_MIN_SPI		       32
 43#define V2M_MAX_SPI		       1019
 44#define V2M_MSI_IIDR		       0xFCC
 45
 46#define V2M_MSI_TYPER_BASE_SPI(x)      \
 47	       (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
 48
 49#define V2M_MSI_TYPER_NUM_SPI(x)       ((x) & V2M_MSI_TYPER_NUM_MASK)
 50
 51/* APM X-Gene with GICv2m MSI_IIDR register value */
 52#define XGENE_GICV2M_MSI_IIDR		0x06000170
 53
 54/* Broadcom NS2 GICv2m MSI_IIDR register value */
 55#define BCM_NS2_GICV2M_MSI_IIDR		0x0000013f
 56
 57/* List of flags for specific v2m implementation */
 58#define GICV2M_NEEDS_SPI_OFFSET		0x00000001
 
 59
 60static LIST_HEAD(v2m_nodes);
 61static DEFINE_SPINLOCK(v2m_lock);
 62
 63struct v2m_data {
 64	struct list_head entry;
 65	struct fwnode_handle *fwnode;
 66	struct resource res;	/* GICv2m resource */
 67	void __iomem *base;	/* GICv2m virt address */
 68	u32 spi_start;		/* The SPI number that MSIs start */
 69	u32 nr_spis;		/* The number of SPIs for MSIs */
 70	u32 spi_offset;		/* offset to be subtracted from SPI number */
 71	unsigned long *bm;	/* MSI vector bitmap */
 72	u32 flags;		/* v2m flags for specific implementation */
 73};
 74
 75static void gicv2m_mask_msi_irq(struct irq_data *d)
 76{
 77	pci_msi_mask_irq(d);
 78	irq_chip_mask_parent(d);
 79}
 80
 81static void gicv2m_unmask_msi_irq(struct irq_data *d)
 82{
 83	pci_msi_unmask_irq(d);
 84	irq_chip_unmask_parent(d);
 85}
 86
 87static struct irq_chip gicv2m_msi_irq_chip = {
 88	.name			= "MSI",
 89	.irq_mask		= gicv2m_mask_msi_irq,
 90	.irq_unmask		= gicv2m_unmask_msi_irq,
 91	.irq_eoi		= irq_chip_eoi_parent,
 92	.irq_write_msi_msg	= pci_msi_domain_write_msg,
 93};
 94
 95static struct msi_domain_info gicv2m_msi_domain_info = {
 96	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 97		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
 98	.chip	= &gicv2m_msi_irq_chip,
 99};
100
 
 
 
 
 
 
 
 
101static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
102{
103	struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
104	phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
105
106	msg->address_hi = upper_32_bits(addr);
107	msg->address_lo = lower_32_bits(addr);
108	msg->data = data->hwirq;
109
 
 
 
 
110	if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
111		msg->data -= v2m->spi_offset;
112
113	iommu_dma_map_msi_msg(data->irq, msg);
114}
115
116static struct irq_chip gicv2m_irq_chip = {
117	.name			= "GICv2m",
118	.irq_mask		= irq_chip_mask_parent,
119	.irq_unmask		= irq_chip_unmask_parent,
120	.irq_eoi		= irq_chip_eoi_parent,
121	.irq_set_affinity	= irq_chip_set_affinity_parent,
122	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
123};
124
125static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
126				       unsigned int virq,
127				       irq_hw_number_t hwirq)
128{
129	struct irq_fwspec fwspec;
130	struct irq_data *d;
131	int err;
132
133	if (is_of_node(domain->parent->fwnode)) {
134		fwspec.fwnode = domain->parent->fwnode;
135		fwspec.param_count = 3;
136		fwspec.param[0] = 0;
137		fwspec.param[1] = hwirq - 32;
138		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
139	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
140		fwspec.fwnode = domain->parent->fwnode;
141		fwspec.param_count = 2;
142		fwspec.param[0] = hwirq;
143		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
144	} else {
145		return -EINVAL;
146	}
147
148	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
149	if (err)
150		return err;
151
152	/* Configure the interrupt line to be edge */
153	d = irq_domain_get_irq_data(domain->parent, virq);
154	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
155	return 0;
156}
157
158static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
159			       int nr_irqs)
160{
161	spin_lock(&v2m_lock);
162	bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
163			      get_count_order(nr_irqs));
164	spin_unlock(&v2m_lock);
165}
166
167static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
168				   unsigned int nr_irqs, void *args)
169{
 
170	struct v2m_data *v2m = NULL, *tmp;
171	int hwirq, offset, i, err = 0;
172
173	spin_lock(&v2m_lock);
174	list_for_each_entry(tmp, &v2m_nodes, entry) {
175		offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
176						 get_count_order(nr_irqs));
177		if (offset >= 0) {
178			v2m = tmp;
179			break;
180		}
181	}
182	spin_unlock(&v2m_lock);
183
184	if (!v2m)
185		return -ENOSPC;
186
187	hwirq = v2m->spi_start + offset;
188
 
 
 
 
 
189	for (i = 0; i < nr_irqs; i++) {
190		err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
191		if (err)
192			goto fail;
193
194		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
195					      &gicv2m_irq_chip, v2m);
196	}
197
198	return 0;
199
200fail:
201	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
202	gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
203	return err;
204}
205
206static void gicv2m_irq_domain_free(struct irq_domain *domain,
207				   unsigned int virq, unsigned int nr_irqs)
208{
209	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
210	struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
211
212	gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
213	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
214}
215
216static const struct irq_domain_ops gicv2m_domain_ops = {
217	.alloc			= gicv2m_irq_domain_alloc,
218	.free			= gicv2m_irq_domain_free,
219};
220
221static bool is_msi_spi_valid(u32 base, u32 num)
222{
223	if (base < V2M_MIN_SPI) {
224		pr_err("Invalid MSI base SPI (base:%u)\n", base);
225		return false;
226	}
227
228	if ((num == 0) || (base + num > V2M_MAX_SPI)) {
229		pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
230		       num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
231		return false;
232	}
233
234	return true;
235}
236
237static struct irq_chip gicv2m_pmsi_irq_chip = {
238	.name			= "pMSI",
239};
240
241static struct msi_domain_ops gicv2m_pmsi_ops = {
242};
243
244static struct msi_domain_info gicv2m_pmsi_domain_info = {
245	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
246	.ops	= &gicv2m_pmsi_ops,
247	.chip	= &gicv2m_pmsi_irq_chip,
248};
249
250static void gicv2m_teardown(void)
251{
252	struct v2m_data *v2m, *tmp;
253
254	list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
255		list_del(&v2m->entry);
256		kfree(v2m->bm);
257		iounmap(v2m->base);
258		of_node_put(to_of_node(v2m->fwnode));
259		if (is_fwnode_irqchip(v2m->fwnode))
260			irq_domain_free_fwnode(v2m->fwnode);
261		kfree(v2m);
262	}
263}
264
265static int gicv2m_allocate_domains(struct irq_domain *parent)
266{
267	struct irq_domain *inner_domain, *pci_domain, *plat_domain;
268	struct v2m_data *v2m;
269
270	v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
271	if (!v2m)
272		return 0;
273
274	inner_domain = irq_domain_create_tree(v2m->fwnode,
275					      &gicv2m_domain_ops, v2m);
276	if (!inner_domain) {
277		pr_err("Failed to create GICv2m domain\n");
278		return -ENOMEM;
279	}
280
281	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
282	inner_domain->parent = parent;
283	pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
284					       &gicv2m_msi_domain_info,
285					       inner_domain);
286	plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
287						     &gicv2m_pmsi_domain_info,
288						     inner_domain);
289	if (!pci_domain || !plat_domain) {
290		pr_err("Failed to create MSI domains\n");
291		if (plat_domain)
292			irq_domain_remove(plat_domain);
293		if (pci_domain)
294			irq_domain_remove(pci_domain);
295		irq_domain_remove(inner_domain);
296		return -ENOMEM;
297	}
298
299	return 0;
300}
301
302static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
303				  u32 spi_start, u32 nr_spis,
304				  struct resource *res)
305{
306	int ret;
307	struct v2m_data *v2m;
308
309	v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
310	if (!v2m) {
311		pr_err("Failed to allocate struct v2m_data.\n");
312		return -ENOMEM;
313	}
314
315	INIT_LIST_HEAD(&v2m->entry);
316	v2m->fwnode = fwnode;
 
317
318	memcpy(&v2m->res, res, sizeof(struct resource));
319
320	v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
321	if (!v2m->base) {
322		pr_err("Failed to map GICv2m resource\n");
323		ret = -ENOMEM;
324		goto err_free_v2m;
325	}
326
327	if (spi_start && nr_spis) {
328		v2m->spi_start = spi_start;
329		v2m->nr_spis = nr_spis;
330	} else {
331		u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
 
 
 
 
 
 
 
332
333		v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
334		v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
335	}
336
337	if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
338		ret = -EINVAL;
339		goto err_iounmap;
340	}
341
342	/*
343	 * APM X-Gene GICv2m implementation has an erratum where
344	 * the MSI data needs to be the offset from the spi_start
345	 * in order to trigger the correct MSI interrupt. This is
346	 * different from the standard GICv2m implementation where
347	 * the MSI data is the absolute value within the range from
348	 * spi_start to (spi_start + num_spis).
349	 *
350	 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
351	 * is 'spi_number - 32'
 
 
352	 */
353	switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
354	case XGENE_GICV2M_MSI_IIDR:
355		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
356		v2m->spi_offset = v2m->spi_start;
357		break;
358	case BCM_NS2_GICV2M_MSI_IIDR:
359		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
360		v2m->spi_offset = 32;
361		break;
 
 
362	}
363
364	v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
365			  GFP_KERNEL);
366	if (!v2m->bm) {
367		ret = -ENOMEM;
368		goto err_iounmap;
369	}
370
371	list_add_tail(&v2m->entry, &v2m_nodes);
372
373	pr_info("range%pR, SPI[%d:%d]\n", res,
374		v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
375	return 0;
376
377err_iounmap:
378	iounmap(v2m->base);
379err_free_v2m:
380	kfree(v2m);
381	return ret;
382}
383
384static struct of_device_id gicv2m_device_id[] = {
385	{	.compatible	= "arm,gic-v2m-frame",	},
386	{},
387};
388
389static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
390				 struct irq_domain *parent)
391{
392	int ret = 0;
393	struct device_node *node = to_of_node(parent_handle);
394	struct device_node *child;
395
396	for (child = of_find_matching_node(node, gicv2m_device_id); child;
397	     child = of_find_matching_node(child, gicv2m_device_id)) {
398		u32 spi_start = 0, nr_spis = 0;
399		struct resource res;
400
401		if (!of_find_property(child, "msi-controller", NULL))
402			continue;
403
404		ret = of_address_to_resource(child, 0, &res);
405		if (ret) {
406			pr_err("Failed to allocate v2m resource.\n");
407			break;
408		}
409
410		if (!of_property_read_u32(child, "arm,msi-base-spi",
411					  &spi_start) &&
412		    !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
413			pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
414				spi_start, nr_spis);
415
416		ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
 
417		if (ret) {
418			of_node_put(child);
419			break;
420		}
421	}
422
423	if (!ret)
424		ret = gicv2m_allocate_domains(parent);
425	if (ret)
426		gicv2m_teardown();
427	return ret;
428}
429
430#ifdef CONFIG_ACPI
431static int acpi_num_msi;
432
433static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
434{
435	struct v2m_data *data;
436
437	if (WARN_ON(acpi_num_msi <= 0))
438		return NULL;
439
440	/* We only return the fwnode of the first MSI frame. */
441	data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
442	if (!data)
443		return NULL;
444
445	return data->fwnode;
446}
447
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
448static int __init
449acpi_parse_madt_msi(struct acpi_subtable_header *header,
450		    const unsigned long end)
451{
452	int ret;
453	struct resource res;
454	u32 spi_start = 0, nr_spis = 0;
455	struct acpi_madt_generic_msi_frame *m;
456	struct fwnode_handle *fwnode;
 
457
458	m = (struct acpi_madt_generic_msi_frame *)header;
459	if (BAD_MADT_ENTRY(m, end))
460		return -EINVAL;
461
462	res.start = m->base_address;
463	res.end = m->base_address + SZ_4K - 1;
464	res.flags = IORESOURCE_MEM;
465
 
 
 
 
 
 
 
466	if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
467		spi_start = m->spi_base;
468		nr_spis = m->spi_count;
469
470		pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
471			spi_start, nr_spis);
472	}
473
474	fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
475	if (!fwnode) {
476		pr_err("Unable to allocate GICv2m domain token\n");
477		return -EINVAL;
478	}
479
480	ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
481	if (ret)
482		irq_domain_free_fwnode(fwnode);
483
484	return ret;
485}
486
487static int __init gicv2m_acpi_init(struct irq_domain *parent)
488{
489	int ret;
490
491	if (acpi_num_msi > 0)
492		return 0;
493
494	acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
495				      acpi_parse_madt_msi, 0);
496
497	if (acpi_num_msi <= 0)
498		goto err_out;
499
500	ret = gicv2m_allocate_domains(parent);
501	if (ret)
502		goto err_out;
503
504	pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
505
506	return 0;
507
508err_out:
509	gicv2m_teardown();
510	return -EINVAL;
511}
512#else /* CONFIG_ACPI */
513static int __init gicv2m_acpi_init(struct irq_domain *parent)
514{
515	return -EINVAL;
516}
517#endif /* CONFIG_ACPI */
518
519int __init gicv2m_init(struct fwnode_handle *parent_handle,
520		       struct irq_domain *parent)
521{
522	if (is_of_node(parent_handle))
523		return gicv2m_of_init(parent_handle, parent);
524
525	return gicv2m_acpi_init(parent);
526}