Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * ARM GIC v2m MSI(-X) support
  4 * Support for Message Signaled Interrupts for systems that
  5 * implement ARM Generic Interrupt Controller: GICv2m.
  6 *
  7 * Copyright (C) 2014 Advanced Micro Devices, Inc.
  8 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  9 *	    Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
 10 *	    Brandon Anderson <brandon.anderson@amd.com>
 
 
 
 
 11 */
 12
 13#define pr_fmt(fmt) "GICv2m: " fmt
 14
 15#include <linux/acpi.h>
 16#include <linux/dma-iommu.h>
 17#include <linux/irq.h>
 18#include <linux/irqdomain.h>
 19#include <linux/kernel.h>
 20#include <linux/pci.h>
 21#include <linux/msi.h>
 22#include <linux/of_address.h>
 23#include <linux/of_pci.h>
 24#include <linux/slab.h>
 25#include <linux/spinlock.h>
 26#include <linux/irqchip/arm-gic.h>
 27
 28/*
 29* MSI_TYPER:
 30*     [31:26] Reserved
 31*     [25:16] lowest SPI assigned to MSI
 32*     [15:10] Reserved
 33*     [9:0]   Numer of SPIs assigned to MSI
 34*/
 35#define V2M_MSI_TYPER		       0x008
 36#define V2M_MSI_TYPER_BASE_SHIFT       16
 37#define V2M_MSI_TYPER_BASE_MASK	       0x3FF
 38#define V2M_MSI_TYPER_NUM_MASK	       0x3FF
 39#define V2M_MSI_SETSPI_NS	       0x040
 40#define V2M_MIN_SPI		       32
 41#define V2M_MAX_SPI		       1019
 42#define V2M_MSI_IIDR		       0xFCC
 43
 44#define V2M_MSI_TYPER_BASE_SPI(x)      \
 45	       (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
 46
 47#define V2M_MSI_TYPER_NUM_SPI(x)       ((x) & V2M_MSI_TYPER_NUM_MASK)
 48
 49/* APM X-Gene with GICv2m MSI_IIDR register value */
 50#define XGENE_GICV2M_MSI_IIDR		0x06000170
 51
 52/* Broadcom NS2 GICv2m MSI_IIDR register value */
 53#define BCM_NS2_GICV2M_MSI_IIDR		0x0000013f
 54
 55/* List of flags for specific v2m implementation */
 56#define GICV2M_NEEDS_SPI_OFFSET		0x00000001
 57#define GICV2M_GRAVITON_ADDRESS_ONLY	0x00000002
 58
 59static LIST_HEAD(v2m_nodes);
 60static DEFINE_SPINLOCK(v2m_lock);
 61
 62struct v2m_data {
 63	struct list_head entry;
 64	struct fwnode_handle *fwnode;
 65	struct resource res;	/* GICv2m resource */
 66	void __iomem *base;	/* GICv2m virt address */
 67	u32 spi_start;		/* The SPI number that MSIs start */
 68	u32 nr_spis;		/* The number of SPIs for MSIs */
 69	u32 spi_offset;		/* offset to be subtracted from SPI number */
 70	unsigned long *bm;	/* MSI vector bitmap */
 71	u32 flags;		/* v2m flags for specific implementation */
 72};
 73
 74static void gicv2m_mask_msi_irq(struct irq_data *d)
 75{
 76	pci_msi_mask_irq(d);
 77	irq_chip_mask_parent(d);
 78}
 79
 80static void gicv2m_unmask_msi_irq(struct irq_data *d)
 81{
 82	pci_msi_unmask_irq(d);
 83	irq_chip_unmask_parent(d);
 84}
 85
 86static struct irq_chip gicv2m_msi_irq_chip = {
 87	.name			= "MSI",
 88	.irq_mask		= gicv2m_mask_msi_irq,
 89	.irq_unmask		= gicv2m_unmask_msi_irq,
 90	.irq_eoi		= irq_chip_eoi_parent,
 91	.irq_write_msi_msg	= pci_msi_domain_write_msg,
 92};
 93
 94static struct msi_domain_info gicv2m_msi_domain_info = {
 95	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 96		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
 97	.chip	= &gicv2m_msi_irq_chip,
 98};
 99
100static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
101{
102	if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
103		return v2m->res.start | ((hwirq - 32) << 3);
104	else
105		return v2m->res.start + V2M_MSI_SETSPI_NS;
106}
107
108static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
109{
110	struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
111	phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
112
113	msg->address_hi = upper_32_bits(addr);
114	msg->address_lo = lower_32_bits(addr);
 
115
116	if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
117		msg->data = 0;
118	else
119		msg->data = data->hwirq;
120	if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
121		msg->data -= v2m->spi_offset;
122
123	iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
124}
125
126static struct irq_chip gicv2m_irq_chip = {
127	.name			= "GICv2m",
128	.irq_mask		= irq_chip_mask_parent,
129	.irq_unmask		= irq_chip_unmask_parent,
130	.irq_eoi		= irq_chip_eoi_parent,
131	.irq_set_affinity	= irq_chip_set_affinity_parent,
132	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
133};
134
135static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
136				       unsigned int virq,
137				       irq_hw_number_t hwirq)
138{
139	struct irq_fwspec fwspec;
140	struct irq_data *d;
141	int err;
142
143	if (is_of_node(domain->parent->fwnode)) {
144		fwspec.fwnode = domain->parent->fwnode;
145		fwspec.param_count = 3;
146		fwspec.param[0] = 0;
147		fwspec.param[1] = hwirq - 32;
148		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
149	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
150		fwspec.fwnode = domain->parent->fwnode;
151		fwspec.param_count = 2;
152		fwspec.param[0] = hwirq;
153		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
154	} else {
155		return -EINVAL;
156	}
157
158	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
159	if (err)
160		return err;
161
162	/* Configure the interrupt line to be edge */
163	d = irq_domain_get_irq_data(domain->parent, virq);
164	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
165	return 0;
166}
167
168static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
169			       int nr_irqs)
170{
 
 
 
 
 
 
 
 
171	spin_lock(&v2m_lock);
172	bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
173			      get_count_order(nr_irqs));
174	spin_unlock(&v2m_lock);
175}
176
177static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
178				   unsigned int nr_irqs, void *args)
179{
180	msi_alloc_info_t *info = args;
181	struct v2m_data *v2m = NULL, *tmp;
182	int hwirq, offset, i, err = 0;
183
184	spin_lock(&v2m_lock);
185	list_for_each_entry(tmp, &v2m_nodes, entry) {
186		offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
187						 get_count_order(nr_irqs));
188		if (offset >= 0) {
189			v2m = tmp;
190			break;
191		}
192	}
193	spin_unlock(&v2m_lock);
194
195	if (!v2m)
196		return -ENOSPC;
197
198	hwirq = v2m->spi_start + offset;
199
200	err = iommu_dma_prepare_msi(info->desc,
201				    gicv2m_get_msi_addr(v2m, hwirq));
202	if (err)
203		return err;
204
205	for (i = 0; i < nr_irqs; i++) {
206		err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
207		if (err)
208			goto fail;
209
210		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
211					      &gicv2m_irq_chip, v2m);
212	}
213
214	return 0;
 
215
216fail:
217	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
218	gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
219	return err;
220}
221
222static void gicv2m_irq_domain_free(struct irq_domain *domain,
223				   unsigned int virq, unsigned int nr_irqs)
224{
225	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
226	struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
227
228	gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
 
229	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
230}
231
232static const struct irq_domain_ops gicv2m_domain_ops = {
233	.alloc			= gicv2m_irq_domain_alloc,
234	.free			= gicv2m_irq_domain_free,
235};
236
237static bool is_msi_spi_valid(u32 base, u32 num)
238{
239	if (base < V2M_MIN_SPI) {
240		pr_err("Invalid MSI base SPI (base:%u)\n", base);
241		return false;
242	}
243
244	if ((num == 0) || (base + num > V2M_MAX_SPI)) {
245		pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
246		       num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
247		return false;
248	}
249
250	return true;
251}
252
253static struct irq_chip gicv2m_pmsi_irq_chip = {
254	.name			= "pMSI",
255};
256
257static struct msi_domain_ops gicv2m_pmsi_ops = {
258};
259
260static struct msi_domain_info gicv2m_pmsi_domain_info = {
261	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
262	.ops	= &gicv2m_pmsi_ops,
263	.chip	= &gicv2m_pmsi_irq_chip,
264};
265
266static void gicv2m_teardown(void)
267{
268	struct v2m_data *v2m, *tmp;
269
270	list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
271		list_del(&v2m->entry);
272		kfree(v2m->bm);
273		iounmap(v2m->base);
274		of_node_put(to_of_node(v2m->fwnode));
275		if (is_fwnode_irqchip(v2m->fwnode))
276			irq_domain_free_fwnode(v2m->fwnode);
277		kfree(v2m);
278	}
279}
280
281static int gicv2m_allocate_domains(struct irq_domain *parent)
282{
283	struct irq_domain *inner_domain, *pci_domain, *plat_domain;
284	struct v2m_data *v2m;
285
286	v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
287	if (!v2m)
288		return 0;
289
290	inner_domain = irq_domain_create_tree(v2m->fwnode,
291					      &gicv2m_domain_ops, v2m);
292	if (!inner_domain) {
293		pr_err("Failed to create GICv2m domain\n");
294		return -ENOMEM;
295	}
296
297	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
298	inner_domain->parent = parent;
299	pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
300					       &gicv2m_msi_domain_info,
301					       inner_domain);
302	plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
303						     &gicv2m_pmsi_domain_info,
304						     inner_domain);
305	if (!pci_domain || !plat_domain) {
306		pr_err("Failed to create MSI domains\n");
307		if (plat_domain)
308			irq_domain_remove(plat_domain);
309		if (pci_domain)
310			irq_domain_remove(pci_domain);
311		irq_domain_remove(inner_domain);
312		return -ENOMEM;
313	}
314
315	return 0;
316}
317
318static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
319				  u32 spi_start, u32 nr_spis,
320				  struct resource *res, u32 flags)
321{
322	int ret;
323	struct v2m_data *v2m;
324
325	v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
326	if (!v2m) {
327		pr_err("Failed to allocate struct v2m_data.\n");
328		return -ENOMEM;
329	}
330
331	INIT_LIST_HEAD(&v2m->entry);
332	v2m->fwnode = fwnode;
333	v2m->flags = flags;
334
335	memcpy(&v2m->res, res, sizeof(struct resource));
336
337	v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
338	if (!v2m->base) {
339		pr_err("Failed to map GICv2m resource\n");
340		ret = -ENOMEM;
341		goto err_free_v2m;
342	}
343
344	if (spi_start && nr_spis) {
345		v2m->spi_start = spi_start;
346		v2m->nr_spis = nr_spis;
347	} else {
348		u32 typer;
349
350		/* Graviton should always have explicit spi_start/nr_spis */
351		if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) {
352			ret = -EINVAL;
353			goto err_iounmap;
354		}
355		typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
356
357		v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
358		v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
359	}
360
361	if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
362		ret = -EINVAL;
363		goto err_iounmap;
364	}
365
366	/*
367	 * APM X-Gene GICv2m implementation has an erratum where
368	 * the MSI data needs to be the offset from the spi_start
369	 * in order to trigger the correct MSI interrupt. This is
370	 * different from the standard GICv2m implementation where
371	 * the MSI data is the absolute value within the range from
372	 * spi_start to (spi_start + num_spis).
373	 *
374	 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
375	 * is 'spi_number - 32'
376	 *
377	 * Reading that register fails on the Graviton implementation
378	 */
379	if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) {
380		switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
381		case XGENE_GICV2M_MSI_IIDR:
382			v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
383			v2m->spi_offset = v2m->spi_start;
384			break;
385		case BCM_NS2_GICV2M_MSI_IIDR:
386			v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
387			v2m->spi_offset = 32;
388			break;
389		}
390	}
391	v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
 
392			  GFP_KERNEL);
393	if (!v2m->bm) {
394		ret = -ENOMEM;
395		goto err_iounmap;
396	}
397
398	list_add_tail(&v2m->entry, &v2m_nodes);
399
400	pr_info("range%pR, SPI[%d:%d]\n", res,
401		v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
402	return 0;
403
404err_iounmap:
405	iounmap(v2m->base);
406err_free_v2m:
407	kfree(v2m);
408	return ret;
409}
410
411static struct of_device_id gicv2m_device_id[] = {
412	{	.compatible	= "arm,gic-v2m-frame",	},
413	{},
414};
415
416static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
417				 struct irq_domain *parent)
418{
419	int ret = 0;
420	struct device_node *node = to_of_node(parent_handle);
421	struct device_node *child;
422
423	for (child = of_find_matching_node(node, gicv2m_device_id); child;
424	     child = of_find_matching_node(child, gicv2m_device_id)) {
425		u32 spi_start = 0, nr_spis = 0;
426		struct resource res;
427
428		if (!of_find_property(child, "msi-controller", NULL))
429			continue;
430
431		ret = of_address_to_resource(child, 0, &res);
432		if (ret) {
433			pr_err("Failed to allocate v2m resource.\n");
434			break;
435		}
436
437		if (!of_property_read_u32(child, "arm,msi-base-spi",
438					  &spi_start) &&
439		    !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
440			pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
441				spi_start, nr_spis);
442
443		ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
444				      &res, 0);
445		if (ret) {
446			of_node_put(child);
447			break;
448		}
449	}
450
451	if (!ret)
452		ret = gicv2m_allocate_domains(parent);
453	if (ret)
454		gicv2m_teardown();
455	return ret;
456}
457
458#ifdef CONFIG_ACPI
459static int acpi_num_msi;
460
461static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
462{
463	struct v2m_data *data;
464
465	if (WARN_ON(acpi_num_msi <= 0))
466		return NULL;
467
468	/* We only return the fwnode of the first MSI frame. */
469	data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
470	if (!data)
471		return NULL;
472
473	return data->fwnode;
474}
475
476static bool acpi_check_amazon_graviton_quirks(void)
477{
478	static struct acpi_table_madt *madt;
479	acpi_status status;
480	bool rc = false;
481
482#define ACPI_AMZN_OEM_ID		"AMAZON"
483
484	status = acpi_get_table(ACPI_SIG_MADT, 0,
485				(struct acpi_table_header **)&madt);
486
487	if (ACPI_FAILURE(status) || !madt)
488		return rc;
489	rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE);
490	acpi_put_table((struct acpi_table_header *)madt);
491
492	return rc;
493}
494
495static int __init
496acpi_parse_madt_msi(union acpi_subtable_headers *header,
497		    const unsigned long end)
498{
499	int ret;
500	struct resource res;
501	u32 spi_start = 0, nr_spis = 0;
502	struct acpi_madt_generic_msi_frame *m;
503	struct fwnode_handle *fwnode;
504	u32 flags = 0;
505
506	m = (struct acpi_madt_generic_msi_frame *)header;
507	if (BAD_MADT_ENTRY(m, end))
508		return -EINVAL;
509
510	res.start = m->base_address;
511	res.end = m->base_address + SZ_4K - 1;
512	res.flags = IORESOURCE_MEM;
513
514	if (acpi_check_amazon_graviton_quirks()) {
515		pr_info("applying Amazon Graviton quirk\n");
516		res.end = res.start + SZ_8K - 1;
517		flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
518		gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
519	}
520
521	if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
522		spi_start = m->spi_base;
523		nr_spis = m->spi_count;
524
525		pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
526			spi_start, nr_spis);
527	}
528
529	fwnode = irq_domain_alloc_fwnode(&res.start);
530	if (!fwnode) {
531		pr_err("Unable to allocate GICv2m domain token\n");
532		return -EINVAL;
533	}
534
535	ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags);
536	if (ret)
537		irq_domain_free_fwnode(fwnode);
538
539	return ret;
540}
541
542static int __init gicv2m_acpi_init(struct irq_domain *parent)
543{
544	int ret;
545
546	if (acpi_num_msi > 0)
547		return 0;
548
549	acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
550				      acpi_parse_madt_msi, 0);
551
552	if (acpi_num_msi <= 0)
553		goto err_out;
554
555	ret = gicv2m_allocate_domains(parent);
556	if (ret)
557		goto err_out;
558
559	pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
560
561	return 0;
562
563err_out:
564	gicv2m_teardown();
565	return -EINVAL;
566}
567#else /* CONFIG_ACPI */
568static int __init gicv2m_acpi_init(struct irq_domain *parent)
569{
570	return -EINVAL;
571}
572#endif /* CONFIG_ACPI */
573
574int __init gicv2m_init(struct fwnode_handle *parent_handle,
575		       struct irq_domain *parent)
576{
577	if (is_of_node(parent_handle))
578		return gicv2m_of_init(parent_handle, parent);
579
580	return gicv2m_acpi_init(parent);
581}
v4.10.11
 
  1/*
  2 * ARM GIC v2m MSI(-X) support
  3 * Support for Message Signaled Interrupts for systems that
  4 * implement ARM Generic Interrupt Controller: GICv2m.
  5 *
  6 * Copyright (C) 2014 Advanced Micro Devices, Inc.
  7 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  8 *	    Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
  9 *	    Brandon Anderson <brandon.anderson@amd.com>
 10 *
 11 * This program is free software; you can redistribute it and/or modify it
 12 * under the terms of the GNU General Public License version 2 as published
 13 * by the Free Software Foundation.
 14 */
 15
 16#define pr_fmt(fmt) "GICv2m: " fmt
 17
 18#include <linux/acpi.h>
 19#include <linux/dma-iommu.h>
 20#include <linux/irq.h>
 21#include <linux/irqdomain.h>
 22#include <linux/kernel.h>
 
 23#include <linux/msi.h>
 24#include <linux/of_address.h>
 25#include <linux/of_pci.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <linux/irqchip/arm-gic.h>
 29
 30/*
 31* MSI_TYPER:
 32*     [31:26] Reserved
 33*     [25:16] lowest SPI assigned to MSI
 34*     [15:10] Reserved
 35*     [9:0]   Numer of SPIs assigned to MSI
 36*/
 37#define V2M_MSI_TYPER		       0x008
 38#define V2M_MSI_TYPER_BASE_SHIFT       16
 39#define V2M_MSI_TYPER_BASE_MASK	       0x3FF
 40#define V2M_MSI_TYPER_NUM_MASK	       0x3FF
 41#define V2M_MSI_SETSPI_NS	       0x040
 42#define V2M_MIN_SPI		       32
 43#define V2M_MAX_SPI		       1019
 44#define V2M_MSI_IIDR		       0xFCC
 45
 46#define V2M_MSI_TYPER_BASE_SPI(x)      \
 47	       (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
 48
 49#define V2M_MSI_TYPER_NUM_SPI(x)       ((x) & V2M_MSI_TYPER_NUM_MASK)
 50
 51/* APM X-Gene with GICv2m MSI_IIDR register value */
 52#define XGENE_GICV2M_MSI_IIDR		0x06000170
 53
 54/* Broadcom NS2 GICv2m MSI_IIDR register value */
 55#define BCM_NS2_GICV2M_MSI_IIDR		0x0000013f
 56
 57/* List of flags for specific v2m implementation */
 58#define GICV2M_NEEDS_SPI_OFFSET		0x00000001
 
 59
 60static LIST_HEAD(v2m_nodes);
 61static DEFINE_SPINLOCK(v2m_lock);
 62
 63struct v2m_data {
 64	struct list_head entry;
 65	struct fwnode_handle *fwnode;
 66	struct resource res;	/* GICv2m resource */
 67	void __iomem *base;	/* GICv2m virt address */
 68	u32 spi_start;		/* The SPI number that MSIs start */
 69	u32 nr_spis;		/* The number of SPIs for MSIs */
 70	u32 spi_offset;		/* offset to be subtracted from SPI number */
 71	unsigned long *bm;	/* MSI vector bitmap */
 72	u32 flags;		/* v2m flags for specific implementation */
 73};
 74
 75static void gicv2m_mask_msi_irq(struct irq_data *d)
 76{
 77	pci_msi_mask_irq(d);
 78	irq_chip_mask_parent(d);
 79}
 80
 81static void gicv2m_unmask_msi_irq(struct irq_data *d)
 82{
 83	pci_msi_unmask_irq(d);
 84	irq_chip_unmask_parent(d);
 85}
 86
 87static struct irq_chip gicv2m_msi_irq_chip = {
 88	.name			= "MSI",
 89	.irq_mask		= gicv2m_mask_msi_irq,
 90	.irq_unmask		= gicv2m_unmask_msi_irq,
 91	.irq_eoi		= irq_chip_eoi_parent,
 92	.irq_write_msi_msg	= pci_msi_domain_write_msg,
 93};
 94
 95static struct msi_domain_info gicv2m_msi_domain_info = {
 96	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 97		   MSI_FLAG_PCI_MSIX),
 98	.chip	= &gicv2m_msi_irq_chip,
 99};
100
 
 
 
 
 
 
 
 
101static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
102{
103	struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
104	phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
105
106	msg->address_hi = upper_32_bits(addr);
107	msg->address_lo = lower_32_bits(addr);
108	msg->data = data->hwirq;
109
 
 
 
 
110	if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
111		msg->data -= v2m->spi_offset;
112
113	iommu_dma_map_msi_msg(data->irq, msg);
114}
115
116static struct irq_chip gicv2m_irq_chip = {
117	.name			= "GICv2m",
118	.irq_mask		= irq_chip_mask_parent,
119	.irq_unmask		= irq_chip_unmask_parent,
120	.irq_eoi		= irq_chip_eoi_parent,
121	.irq_set_affinity	= irq_chip_set_affinity_parent,
122	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
123};
124
125static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
126				       unsigned int virq,
127				       irq_hw_number_t hwirq)
128{
129	struct irq_fwspec fwspec;
130	struct irq_data *d;
131	int err;
132
133	if (is_of_node(domain->parent->fwnode)) {
134		fwspec.fwnode = domain->parent->fwnode;
135		fwspec.param_count = 3;
136		fwspec.param[0] = 0;
137		fwspec.param[1] = hwirq - 32;
138		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
139	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
140		fwspec.fwnode = domain->parent->fwnode;
141		fwspec.param_count = 2;
142		fwspec.param[0] = hwirq;
143		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
144	} else {
145		return -EINVAL;
146	}
147
148	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
149	if (err)
150		return err;
151
152	/* Configure the interrupt line to be edge */
153	d = irq_domain_get_irq_data(domain->parent, virq);
154	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
155	return 0;
156}
157
158static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
 
159{
160	int pos;
161
162	pos = hwirq - v2m->spi_start;
163	if (pos < 0 || pos >= v2m->nr_spis) {
164		pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
165		return;
166	}
167
168	spin_lock(&v2m_lock);
169	__clear_bit(pos, v2m->bm);
 
170	spin_unlock(&v2m_lock);
171}
172
173static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
174				   unsigned int nr_irqs, void *args)
175{
 
176	struct v2m_data *v2m = NULL, *tmp;
177	int hwirq, offset, err = 0;
178
179	spin_lock(&v2m_lock);
180	list_for_each_entry(tmp, &v2m_nodes, entry) {
181		offset = find_first_zero_bit(tmp->bm, tmp->nr_spis);
182		if (offset < tmp->nr_spis) {
183			__set_bit(offset, tmp->bm);
184			v2m = tmp;
185			break;
186		}
187	}
188	spin_unlock(&v2m_lock);
189
190	if (!v2m)
191		return -ENOSPC;
192
193	hwirq = v2m->spi_start + offset;
194
195	err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
196	if (err) {
197		gicv2m_unalloc_msi(v2m, hwirq);
198		return err;
 
 
 
 
 
 
 
 
199	}
200
201	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
202				      &gicv2m_irq_chip, v2m);
203
204	return 0;
 
 
 
205}
206
207static void gicv2m_irq_domain_free(struct irq_domain *domain,
208				   unsigned int virq, unsigned int nr_irqs)
209{
210	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
211	struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
212
213	BUG_ON(nr_irqs != 1);
214	gicv2m_unalloc_msi(v2m, d->hwirq);
215	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
216}
217
218static const struct irq_domain_ops gicv2m_domain_ops = {
219	.alloc			= gicv2m_irq_domain_alloc,
220	.free			= gicv2m_irq_domain_free,
221};
222
223static bool is_msi_spi_valid(u32 base, u32 num)
224{
225	if (base < V2M_MIN_SPI) {
226		pr_err("Invalid MSI base SPI (base:%u)\n", base);
227		return false;
228	}
229
230	if ((num == 0) || (base + num > V2M_MAX_SPI)) {
231		pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
232		       num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
233		return false;
234	}
235
236	return true;
237}
238
239static struct irq_chip gicv2m_pmsi_irq_chip = {
240	.name			= "pMSI",
241};
242
243static struct msi_domain_ops gicv2m_pmsi_ops = {
244};
245
246static struct msi_domain_info gicv2m_pmsi_domain_info = {
247	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
248	.ops	= &gicv2m_pmsi_ops,
249	.chip	= &gicv2m_pmsi_irq_chip,
250};
251
252static void gicv2m_teardown(void)
253{
254	struct v2m_data *v2m, *tmp;
255
256	list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
257		list_del(&v2m->entry);
258		kfree(v2m->bm);
259		iounmap(v2m->base);
260		of_node_put(to_of_node(v2m->fwnode));
261		if (is_fwnode_irqchip(v2m->fwnode))
262			irq_domain_free_fwnode(v2m->fwnode);
263		kfree(v2m);
264	}
265}
266
267static int gicv2m_allocate_domains(struct irq_domain *parent)
268{
269	struct irq_domain *inner_domain, *pci_domain, *plat_domain;
270	struct v2m_data *v2m;
271
272	v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
273	if (!v2m)
274		return 0;
275
276	inner_domain = irq_domain_create_tree(v2m->fwnode,
277					      &gicv2m_domain_ops, v2m);
278	if (!inner_domain) {
279		pr_err("Failed to create GICv2m domain\n");
280		return -ENOMEM;
281	}
282
283	inner_domain->bus_token = DOMAIN_BUS_NEXUS;
284	inner_domain->parent = parent;
285	pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
286					       &gicv2m_msi_domain_info,
287					       inner_domain);
288	plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
289						     &gicv2m_pmsi_domain_info,
290						     inner_domain);
291	if (!pci_domain || !plat_domain) {
292		pr_err("Failed to create MSI domains\n");
293		if (plat_domain)
294			irq_domain_remove(plat_domain);
295		if (pci_domain)
296			irq_domain_remove(pci_domain);
297		irq_domain_remove(inner_domain);
298		return -ENOMEM;
299	}
300
301	return 0;
302}
303
304static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
305				  u32 spi_start, u32 nr_spis,
306				  struct resource *res)
307{
308	int ret;
309	struct v2m_data *v2m;
310
311	v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
312	if (!v2m) {
313		pr_err("Failed to allocate struct v2m_data.\n");
314		return -ENOMEM;
315	}
316
317	INIT_LIST_HEAD(&v2m->entry);
318	v2m->fwnode = fwnode;
 
319
320	memcpy(&v2m->res, res, sizeof(struct resource));
321
322	v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
323	if (!v2m->base) {
324		pr_err("Failed to map GICv2m resource\n");
325		ret = -ENOMEM;
326		goto err_free_v2m;
327	}
328
329	if (spi_start && nr_spis) {
330		v2m->spi_start = spi_start;
331		v2m->nr_spis = nr_spis;
332	} else {
333		u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
 
 
 
 
 
 
 
334
335		v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
336		v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
337	}
338
339	if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
340		ret = -EINVAL;
341		goto err_iounmap;
342	}
343
344	/*
345	 * APM X-Gene GICv2m implementation has an erratum where
346	 * the MSI data needs to be the offset from the spi_start
347	 * in order to trigger the correct MSI interrupt. This is
348	 * different from the standard GICv2m implementation where
349	 * the MSI data is the absolute value within the range from
350	 * spi_start to (spi_start + num_spis).
351	 *
352	 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
353	 * is 'spi_number - 32'
 
 
354	 */
355	switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
356	case XGENE_GICV2M_MSI_IIDR:
357		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
358		v2m->spi_offset = v2m->spi_start;
359		break;
360	case BCM_NS2_GICV2M_MSI_IIDR:
361		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
362		v2m->spi_offset = 32;
363		break;
 
 
364	}
365
366	v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
367			  GFP_KERNEL);
368	if (!v2m->bm) {
369		ret = -ENOMEM;
370		goto err_iounmap;
371	}
372
373	list_add_tail(&v2m->entry, &v2m_nodes);
374
375	pr_info("range%pR, SPI[%d:%d]\n", res,
376		v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
377	return 0;
378
379err_iounmap:
380	iounmap(v2m->base);
381err_free_v2m:
382	kfree(v2m);
383	return ret;
384}
385
386static struct of_device_id gicv2m_device_id[] = {
387	{	.compatible	= "arm,gic-v2m-frame",	},
388	{},
389};
390
391static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
392				 struct irq_domain *parent)
393{
394	int ret = 0;
395	struct device_node *node = to_of_node(parent_handle);
396	struct device_node *child;
397
398	for (child = of_find_matching_node(node, gicv2m_device_id); child;
399	     child = of_find_matching_node(child, gicv2m_device_id)) {
400		u32 spi_start = 0, nr_spis = 0;
401		struct resource res;
402
403		if (!of_find_property(child, "msi-controller", NULL))
404			continue;
405
406		ret = of_address_to_resource(child, 0, &res);
407		if (ret) {
408			pr_err("Failed to allocate v2m resource.\n");
409			break;
410		}
411
412		if (!of_property_read_u32(child, "arm,msi-base-spi",
413					  &spi_start) &&
414		    !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
415			pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
416				spi_start, nr_spis);
417
418		ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
 
419		if (ret) {
420			of_node_put(child);
421			break;
422		}
423	}
424
425	if (!ret)
426		ret = gicv2m_allocate_domains(parent);
427	if (ret)
428		gicv2m_teardown();
429	return ret;
430}
431
432#ifdef CONFIG_ACPI
433static int acpi_num_msi;
434
435static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
436{
437	struct v2m_data *data;
438
439	if (WARN_ON(acpi_num_msi <= 0))
440		return NULL;
441
442	/* We only return the fwnode of the first MSI frame. */
443	data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
444	if (!data)
445		return NULL;
446
447	return data->fwnode;
448}
449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450static int __init
451acpi_parse_madt_msi(struct acpi_subtable_header *header,
452		    const unsigned long end)
453{
454	int ret;
455	struct resource res;
456	u32 spi_start = 0, nr_spis = 0;
457	struct acpi_madt_generic_msi_frame *m;
458	struct fwnode_handle *fwnode;
 
459
460	m = (struct acpi_madt_generic_msi_frame *)header;
461	if (BAD_MADT_ENTRY(m, end))
462		return -EINVAL;
463
464	res.start = m->base_address;
465	res.end = m->base_address + SZ_4K - 1;
466	res.flags = IORESOURCE_MEM;
467
 
 
 
 
 
 
 
468	if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
469		spi_start = m->spi_base;
470		nr_spis = m->spi_count;
471
472		pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
473			spi_start, nr_spis);
474	}
475
476	fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
477	if (!fwnode) {
478		pr_err("Unable to allocate GICv2m domain token\n");
479		return -EINVAL;
480	}
481
482	ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
483	if (ret)
484		irq_domain_free_fwnode(fwnode);
485
486	return ret;
487}
488
489static int __init gicv2m_acpi_init(struct irq_domain *parent)
490{
491	int ret;
492
493	if (acpi_num_msi > 0)
494		return 0;
495
496	acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
497				      acpi_parse_madt_msi, 0);
498
499	if (acpi_num_msi <= 0)
500		goto err_out;
501
502	ret = gicv2m_allocate_domains(parent);
503	if (ret)
504		goto err_out;
505
506	pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
507
508	return 0;
509
510err_out:
511	gicv2m_teardown();
512	return -EINVAL;
513}
514#else /* CONFIG_ACPI */
515static int __init gicv2m_acpi_init(struct irq_domain *parent)
516{
517	return -EINVAL;
518}
519#endif /* CONFIG_ACPI */
520
521int __init gicv2m_init(struct fwnode_handle *parent_handle,
522		       struct irq_domain *parent)
523{
524	if (is_of_node(parent_handle))
525		return gicv2m_of_init(parent_handle, parent);
526
527	return gicv2m_acpi_init(parent);
528}