Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Marvell Armada 370 and Armada XP SoC IRQ handling
  4 *
  5 * Copyright (C) 2012 Marvell
  6 *
  7 * Lior Amsalem <alior@marvell.com>
  8 * Gregory CLEMENT <gregory.clement@free-electrons.com>
  9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 10 * Ben Dooks <ben.dooks@codethink.co.uk>
 11 */
 12
 13#include <linux/bitfield.h>
 14#include <linux/bits.h>
 15#include <linux/err.h>
 16#include <linux/kernel.h>
 17#include <linux/module.h>
 18#include <linux/init.h>
 19#include <linux/irq.h>
 20#include <linux/interrupt.h>
 21#include <linux/irqchip.h>
 22#include <linux/irqchip/chained_irq.h>
 23#include <linux/cpu.h>
 24#include <linux/io.h>
 25#include <linux/of_address.h>
 26#include <linux/of_irq.h>
 27#include <linux/of_pci.h>
 28#include <linux/irqdomain.h>
 29#include <linux/slab.h>
 30#include <linux/syscore_ops.h>
 31#include <linux/msi.h>
 32#include <linux/types.h>
 33#include <asm/mach/arch.h>
 34#include <asm/exception.h>
 35#include <asm/smp_plat.h>
 36#include <asm/mach/irq.h>
 37
 38/*
 39 * Overall diagram of the Armada XP interrupt controller:
 40 *
 41 *    To CPU 0                 To CPU 1
 42 *
 43 *       /\                       /\
 44 *       ||                       ||
 45 * +---------------+     +---------------+
 46 * |               |	 |               |
 47 * |    per-CPU    |	 |    per-CPU    |
 48 * |  mask/unmask  |	 |  mask/unmask  |
 49 * |     CPU0      |	 |     CPU1      |
 50 * |               |	 |               |
 51 * +---------------+	 +---------------+
 52 *        /\                       /\
 53 *        ||                       ||
 54 *        \\_______________________//
 55 *                     ||
 56 *            +-------------------+
 57 *            |                   |
 58 *            | Global interrupt  |
 59 *            |    mask/unmask    |
 60 *            |                   |
 61 *            +-------------------+
 62 *                     /\
 63 *                     ||
 64 *               interrupt from
 65 *                   device
 66 *
 67 * The "global interrupt mask/unmask" is modified using the
 68 * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE
 69 * registers, which are relative to "mpic->base".
 70 *
 71 * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK
 72 * and MPIC_INT_CLEAR_MASK registers, which are relative to
 73 * "mpic->per_cpu". This base address points to a special address,
 74 * which automatically accesses the registers of the current CPU.
 75 *
 76 * The per-CPU mask/unmask can also be adjusted using the global
 77 * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to
 78 * configure interrupt affinity.
 79 *
 80 * Due to this model, all interrupts need to be mask/unmasked at two
 81 * different levels: at the global level and at the per-CPU level.
 82 *
 83 * This driver takes the following approach to deal with this:
 84 *
 85 *  - For global interrupts:
 86 *
 87 *    At ->map() time, a global interrupt is unmasked at the per-CPU
 88 *    mask/unmask level. It is therefore unmasked at this level for
 89 *    the current CPU, running the ->map() code. This allows to have
 90 *    the interrupt unmasked at this level in non-SMP
 91 *    configurations. In SMP configurations, the ->set_affinity()
 92 *    callback is called, which using the MPIC_INT_SOURCE_CTL()
 93 *    readjusts the per-CPU mask/unmask for the interrupt.
 94 *
 95 *    The ->mask() and ->unmask() operations only mask/unmask the
 96 *    interrupt at the "global" level.
 97 *
 98 *    So, a global interrupt is enabled at the per-CPU level as soon
 99 *    as it is mapped. At run time, the masking/unmasking takes place
100 *    at the global level.
101 *
102 *  - For per-CPU interrupts
103 *
104 *    At ->map() time, a per-CPU interrupt is unmasked at the global
105 *    mask/unmask level.
106 *
107 *    The ->mask() and ->unmask() operations mask/unmask the interrupt
108 *    at the per-CPU level.
109 *
110 *    So, a per-CPU interrupt is enabled at the global level as soon
111 *    as it is mapped. At run time, the masking/unmasking takes place
112 *    at the per-CPU level.
113 */
114
115/* Registers relative to mpic->base */
116#define MPIC_INT_CONTROL			0x00
117#define MPIC_INT_CONTROL_NUMINT_MASK		GENMASK(12, 2)
118#define MPIC_SW_TRIG_INT			0x04
119#define MPIC_INT_SET_ENABLE			0x30
120#define MPIC_INT_CLEAR_ENABLE			0x34
121#define MPIC_INT_SOURCE_CTL(hwirq)		(0x100 + (hwirq) * 4)
122#define MPIC_INT_SOURCE_CPU_MASK		GENMASK(3, 0)
123#define MPIC_INT_IRQ_FIQ_MASK(cpuid)		((BIT(0) | BIT(8)) << (cpuid))
124
125/* Registers relative to mpic->per_cpu */
126#define MPIC_IN_DRBEL_CAUSE			0x08
127#define MPIC_IN_DRBEL_MASK			0x0c
128#define MPIC_PPI_CAUSE				0x10
129#define MPIC_CPU_INTACK				0x44
130#define MPIC_CPU_INTACK_IID_MASK		GENMASK(9, 0)
131#define MPIC_INT_SET_MASK			0x48
132#define MPIC_INT_CLEAR_MASK			0x4C
133#define MPIC_INT_FABRIC_MASK			0x54
134#define MPIC_INT_CAUSE_PERF(cpu)		BIT(cpu)
135
136#define MPIC_PER_CPU_IRQS_NR			29
137
138/* IPI and MSI interrupt definitions for IPI platforms */
139#define IPI_DOORBELL_NR				8
140#define IPI_DOORBELL_MASK			GENMASK(7, 0)
141#define PCI_MSI_DOORBELL_START			16
142#define PCI_MSI_DOORBELL_NR			16
143#define PCI_MSI_DOORBELL_MASK			GENMASK(31, 16)
144
145/* MSI interrupt definitions for non-IPI platforms */
146#define PCI_MSI_FULL_DOORBELL_START		0
147#define PCI_MSI_FULL_DOORBELL_NR		32
148#define PCI_MSI_FULL_DOORBELL_MASK		GENMASK(31, 0)
149#define PCI_MSI_FULL_DOORBELL_SRC0_MASK		GENMASK(15, 0)
150#define PCI_MSI_FULL_DOORBELL_SRC1_MASK		GENMASK(31, 16)
151
152/**
153 * struct mpic - MPIC private data structure
154 * @base:		MPIC registers base address
155 * @per_cpu:		per-CPU registers base address
156 * @parent_irq:		parent IRQ if MPIC is not top-level interrupt controller
157 * @domain:		MPIC main interrupt domain
158 * @ipi_domain:		IPI domain
159 * @msi_domain:		MSI domain
160 * @msi_inner_domain:	MSI inner domain
161 * @msi_used:		bitmap of used MSI numbers
162 * @msi_lock:		mutex serializing access to @msi_used
163 * @msi_doorbell_addr:	physical address of MSI doorbell register
164 * @msi_doorbell_mask:	mask of available doorbell bits for MSIs (either PCI_MSI_DOORBELL_MASK or
165 *			PCI_MSI_FULL_DOORBELL_MASK)
166 * @msi_doorbell_start:	first set bit in @msi_doorbell_mask
167 * @msi_doorbell_size:	number of set bits in @msi_doorbell_mask
168 * @doorbell_mask:	doorbell mask of MSIs and IPIs, stored on suspend, restored on resume
169 */
170struct mpic {
171	void __iomem *base;
172	void __iomem *per_cpu;
173	int parent_irq;
174	struct irq_domain *domain;
175#ifdef CONFIG_SMP
176	struct irq_domain *ipi_domain;
177#endif
178#ifdef CONFIG_PCI_MSI
179	struct irq_domain *msi_domain;
180	struct irq_domain *msi_inner_domain;
181	DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
182	struct mutex msi_lock;
183	phys_addr_t msi_doorbell_addr;
184	u32 msi_doorbell_mask;
185	unsigned int msi_doorbell_start, msi_doorbell_size;
186#endif
187	u32 doorbell_mask;
188};
189
190static struct mpic *mpic_data __ro_after_init;
191
192static inline bool mpic_is_ipi_available(struct mpic *mpic)
193{
194	/*
195	 * We distinguish IPI availability in the IC by the IC not having a
196	 * parent irq defined. If a parent irq is defined, there is a parent
197	 * interrupt controller (e.g. GIC) that takes care of inter-processor
198	 * interrupts.
199	 */
200	return mpic->parent_irq <= 0;
201}
202
203static inline bool mpic_is_percpu_irq(irq_hw_number_t hwirq)
204{
205	return hwirq < MPIC_PER_CPU_IRQS_NR;
206}
207
208/*
209 * In SMP mode:
210 * For shared global interrupts, mask/unmask global enable bit
211 * For CPU interrupts, mask/unmask the calling CPU's bit
212 */
213static void mpic_irq_mask(struct irq_data *d)
214{
215	struct mpic *mpic = irq_data_get_irq_chip_data(d);
216	irq_hw_number_t hwirq = irqd_to_hwirq(d);
217
218	if (!mpic_is_percpu_irq(hwirq))
219		writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE);
220	else
221		writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK);
222}
223
224static void mpic_irq_unmask(struct irq_data *d)
225{
226	struct mpic *mpic = irq_data_get_irq_chip_data(d);
227	irq_hw_number_t hwirq = irqd_to_hwirq(d);
228
229	if (!mpic_is_percpu_irq(hwirq))
230		writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
231	else
232		writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
233}
234
235#ifdef CONFIG_PCI_MSI
236
237static struct irq_chip mpic_msi_irq_chip = {
238	.name		= "MPIC MSI",
239	.irq_mask	= pci_msi_mask_irq,
240	.irq_unmask	= pci_msi_unmask_irq,
241};
242
243static struct msi_domain_info mpic_msi_domain_info = {
244	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
245		   MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
246	.chip	= &mpic_msi_irq_chip,
247};
248
249static void mpic_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
250{
251	unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
252	struct mpic *mpic = irq_data_get_irq_chip_data(d);
253
254	msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr);
255	msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr);
256	msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start);
257}
258
259static int mpic_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
260{
261	unsigned int cpu;
262
263	if (!force)
264		cpu = cpumask_any_and(mask, cpu_online_mask);
265	else
266		cpu = cpumask_first(mask);
267
268	if (cpu >= nr_cpu_ids)
269		return -EINVAL;
270
271	irq_data_update_effective_affinity(d, cpumask_of(cpu));
272
273	return IRQ_SET_MASK_OK;
274}
275
276static struct irq_chip mpic_msi_bottom_irq_chip = {
277	.name			= "MPIC MSI",
278	.irq_compose_msi_msg	= mpic_compose_msi_msg,
279	.irq_set_affinity	= mpic_msi_set_affinity,
280};
281
282static int mpic_msi_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs,
283			  void *args)
284{
285	struct mpic *mpic = domain->host_data;
286	int hwirq;
287
288	mutex_lock(&mpic->msi_lock);
289	hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size,
290					order_base_2(nr_irqs));
291	mutex_unlock(&mpic->msi_lock);
292
293	if (hwirq < 0)
294		return -ENOSPC;
295
296	for (unsigned int i = 0; i < nr_irqs; i++) {
297		irq_domain_set_info(domain, virq + i, hwirq + i,
298				    &mpic_msi_bottom_irq_chip,
299				    domain->host_data, handle_simple_irq,
300				    NULL, NULL);
301	}
302
303	return 0;
304}
305
306static void mpic_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs)
307{
308	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
309	struct mpic *mpic = domain->host_data;
310
311	mutex_lock(&mpic->msi_lock);
312	bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs));
313	mutex_unlock(&mpic->msi_lock);
314}
315
316static const struct irq_domain_ops mpic_msi_domain_ops = {
317	.alloc	= mpic_msi_alloc,
318	.free	= mpic_msi_free,
319};
320
321static void mpic_msi_reenable_percpu(struct mpic *mpic)
322{
323	u32 reg;
324
325	/* Enable MSI doorbell mask and combined cpu local interrupt */
326	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
327	reg |= mpic->msi_doorbell_mask;
328	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
329
330	/* Unmask local doorbell interrupt */
331	writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
332}
333
334static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node,
335				phys_addr_t main_int_phys_base)
336{
337	mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT;
338
339	mutex_init(&mpic->msi_lock);
340
341	if (mpic_is_ipi_available(mpic)) {
342		mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START;
343		mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR;
344		mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK;
345	} else {
346		mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START;
347		mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR;
348		mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK;
349	}
350
351	mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size,
352						       &mpic_msi_domain_ops, mpic);
353	if (!mpic->msi_inner_domain)
354		return -ENOMEM;
355
356	mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info,
357						     mpic->msi_inner_domain);
358	if (!mpic->msi_domain) {
359		irq_domain_remove(mpic->msi_inner_domain);
360		return -ENOMEM;
361	}
362
363	mpic_msi_reenable_percpu(mpic);
364
365	/* Unmask low 16 MSI irqs on non-IPI platforms */
366	if (!mpic_is_ipi_available(mpic))
367		writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
368
369	return 0;
370}
371#else
372static __maybe_unused void mpic_msi_reenable_percpu(struct mpic *mpic) {}
373
374static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node,
375				phys_addr_t main_int_phys_base)
376{
377	return 0;
378}
379#endif
380
381static void mpic_perf_init(struct mpic *mpic)
382{
383	u32 cpuid;
384
385	/*
386	 * This Performance Counter Overflow interrupt is specific for
387	 * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
388	 */
389	if (!of_machine_is_compatible("marvell,armada-370-xp"))
390		return;
391
392	cpuid = cpu_logical_map(smp_processor_id());
393
394	/* Enable Performance Counter Overflow interrupts */
395	writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK);
396}
397
398#ifdef CONFIG_SMP
399static void mpic_ipi_mask(struct irq_data *d)
400{
401	struct mpic *mpic = irq_data_get_irq_chip_data(d);
402	u32 reg;
403
404	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
405	reg &= ~BIT(d->hwirq);
406	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
407}
408
409static void mpic_ipi_unmask(struct irq_data *d)
410{
411	struct mpic *mpic = irq_data_get_irq_chip_data(d);
412	u32 reg;
413
414	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
415	reg |= BIT(d->hwirq);
416	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
417}
418
419static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
420{
421	struct mpic *mpic = irq_data_get_irq_chip_data(d);
422	unsigned int cpu;
423	u32 map = 0;
424
425	/* Convert our logical CPU mask into a physical one. */
426	for_each_cpu(cpu, mask)
427		map |= BIT(cpu_logical_map(cpu));
428
429	/*
430	 * Ensure that stores to Normal memory are visible to the
431	 * other CPUs before issuing the IPI.
432	 */
433	dsb();
434
435	/* submit softirq */
436	writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT);
437}
438
439static void mpic_ipi_ack(struct irq_data *d)
440{
441	struct mpic *mpic = irq_data_get_irq_chip_data(d);
442
443	writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
444}
445
446static struct irq_chip mpic_ipi_irqchip = {
447	.name		= "IPI",
448	.irq_ack	= mpic_ipi_ack,
449	.irq_mask	= mpic_ipi_mask,
450	.irq_unmask	= mpic_ipi_unmask,
451	.ipi_send_mask	= mpic_ipi_send_mask,
452};
453
454static int mpic_ipi_alloc(struct irq_domain *d, unsigned int virq,
455			  unsigned int nr_irqs, void *args)
456{
457	for (unsigned int i = 0; i < nr_irqs; i++) {
458		irq_set_percpu_devid(virq + i);
459		irq_domain_set_info(d, virq + i, i, &mpic_ipi_irqchip, d->host_data,
460				    handle_percpu_devid_irq, NULL, NULL);
461	}
462
463	return 0;
464}
465
466static void mpic_ipi_free(struct irq_domain *d, unsigned int virq,
467			  unsigned int nr_irqs)
468{
469	/* Not freeing IPIs */
470}
471
472static const struct irq_domain_ops mpic_ipi_domain_ops = {
473	.alloc	= mpic_ipi_alloc,
474	.free	= mpic_ipi_free,
475};
476
477static void mpic_ipi_resume(struct mpic *mpic)
478{
479	for (irq_hw_number_t i = 0; i < IPI_DOORBELL_NR; i++) {
480		unsigned int virq = irq_find_mapping(mpic->ipi_domain, i);
481		struct irq_data *d;
482
483		if (!virq || !irq_percpu_is_enabled(virq))
484			continue;
485
486		d = irq_domain_get_irq_data(mpic->ipi_domain, virq);
487		mpic_ipi_unmask(d);
488	}
489}
490
491static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node)
492{
493	int base_ipi;
494
495	mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR,
496						    &mpic_ipi_domain_ops, mpic);
497	if (WARN_ON(!mpic->ipi_domain))
498		return -ENOMEM;
499
500	irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI);
501	base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL);
502	if (WARN_ON(!base_ipi))
503		return -ENOMEM;
504
505	set_smp_ipi_range(base_ipi, IPI_DOORBELL_NR);
506
507	return 0;
508}
509
510static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force)
511{
512	struct mpic *mpic = irq_data_get_irq_chip_data(d);
513	irq_hw_number_t hwirq = irqd_to_hwirq(d);
514	unsigned int cpu;
515
516	/* Select a single core from the affinity mask which is online */
517	cpu = cpumask_any_and(mask_val, cpu_online_mask);
518
519	atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq),
520			 MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu)));
521
522	irq_data_update_effective_affinity(d, cpumask_of(cpu));
523
524	return IRQ_SET_MASK_OK;
525}
526
527static void mpic_smp_cpu_init(struct mpic *mpic)
528{
529	for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++)
530		writel(i, mpic->per_cpu + MPIC_INT_SET_MASK);
531
532	if (!mpic_is_ipi_available(mpic))
533		return;
534
535	/* Disable all IPIs */
536	writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
537
538	/* Clear pending IPIs */
539	writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
540
541	/* Unmask IPI interrupt */
542	writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
543}
544
545static void mpic_reenable_percpu(struct mpic *mpic)
546{
547	/* Re-enable per-CPU interrupts that were enabled before suspend */
548	for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) {
549		unsigned int virq = irq_linear_revmap(mpic->domain, i);
550		struct irq_data *d;
551
552		if (!virq || !irq_percpu_is_enabled(virq))
553			continue;
554
555		d = irq_get_irq_data(virq);
556		mpic_irq_unmask(d);
557	}
558
559	if (mpic_is_ipi_available(mpic))
560		mpic_ipi_resume(mpic);
561
562	mpic_msi_reenable_percpu(mpic);
563}
564
565static int mpic_starting_cpu(unsigned int cpu)
566{
567	struct mpic *mpic = irq_get_default_host()->host_data;
568
569	mpic_perf_init(mpic);
570	mpic_smp_cpu_init(mpic);
571	mpic_reenable_percpu(mpic);
572
573	return 0;
574}
575
576static int mpic_cascaded_starting_cpu(unsigned int cpu)
577{
578	struct mpic *mpic = mpic_data;
579
580	mpic_perf_init(mpic);
581	mpic_reenable_percpu(mpic);
582	enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE);
583
584	return 0;
585}
586#else
587static void mpic_smp_cpu_init(struct mpic *mpic) {}
588static void mpic_ipi_resume(struct mpic *mpic) {}
589#endif
590
591static struct irq_chip mpic_irq_chip = {
592	.name		= "MPIC",
593	.irq_mask	= mpic_irq_mask,
594	.irq_mask_ack	= mpic_irq_mask,
595	.irq_unmask	= mpic_irq_unmask,
596#ifdef CONFIG_SMP
597	.irq_set_affinity = mpic_set_affinity,
598#endif
599	.flags		= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
600};
601
602static int mpic_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq)
603{
604	struct mpic *mpic = domain->host_data;
605
606	/* IRQs 0 and 1 cannot be mapped, they are handled internally */
607	if (hwirq <= 1)
608		return -EINVAL;
609
610	irq_set_chip_data(virq, mpic);
611
612	mpic_irq_mask(irq_get_irq_data(virq));
613	if (!mpic_is_percpu_irq(hwirq))
614		writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
615	else
616		writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
617	irq_set_status_flags(virq, IRQ_LEVEL);
618
619	if (mpic_is_percpu_irq(hwirq)) {
620		irq_set_percpu_devid(virq);
621		irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_percpu_devid_irq);
622	} else {
623		irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_level_irq);
624		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
625	}
626	irq_set_probe(virq);
627	return 0;
628}
629
630static const struct irq_domain_ops mpic_irq_ops = {
631	.map	= mpic_irq_map,
632	.xlate	= irq_domain_xlate_onecell,
633};
634
635#ifdef CONFIG_PCI_MSI
636static void mpic_handle_msi_irq(struct mpic *mpic)
637{
638	unsigned long cause;
639	unsigned int i;
640
641	cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
642	cause &= mpic->msi_doorbell_mask;
643	writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
644
645	for_each_set_bit(i, &cause, BITS_PER_LONG)
646		generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start);
647}
648#else
649static void mpic_handle_msi_irq(struct mpic *mpic) {}
650#endif
651
652#ifdef CONFIG_SMP
653static void mpic_handle_ipi_irq(struct mpic *mpic)
654{
655	unsigned long cause;
656	irq_hw_number_t i;
657
658	cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
659	cause &= IPI_DOORBELL_MASK;
660
661	for_each_set_bit(i, &cause, IPI_DOORBELL_NR)
662		generic_handle_domain_irq(mpic->ipi_domain, i);
663}
664#else
665static inline void mpic_handle_ipi_irq(struct mpic *mpic) {}
666#endif
667
668static void mpic_handle_cascade_irq(struct irq_desc *desc)
669{
670	struct mpic *mpic = irq_desc_get_handler_data(desc);
671	struct irq_chip *chip = irq_desc_get_chip(desc);
672	unsigned long cause;
673	u32 irqsrc, cpuid;
674	irq_hw_number_t i;
675
676	chained_irq_enter(chip, desc);
677
678	cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE);
679	cpuid = cpu_logical_map(smp_processor_id());
680
681	for_each_set_bit(i, &cause, MPIC_PER_CPU_IRQS_NR) {
682		irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i));
683
684		/* Check if the interrupt is not masked on current CPU.
685		 * Test IRQ (0-1) and FIQ (8-9) mask bits.
686		 */
687		if (!(irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid)))
688			continue;
689
690		if (i == 0 || i == 1) {
691			mpic_handle_msi_irq(mpic);
692			continue;
693		}
694
695		generic_handle_domain_irq(mpic->domain, i);
696	}
697
698	chained_irq_exit(chip, desc);
699}
700
701static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
702{
703	struct mpic *mpic = irq_get_default_host()->host_data;
704	irq_hw_number_t i;
705	u32 irqstat;
706
707	do {
708		irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK);
709		i = FIELD_GET(MPIC_CPU_INTACK_IID_MASK, irqstat);
710
711		if (i > 1022)
712			break;
713
714		if (i > 1)
715			generic_handle_domain_irq(mpic->domain, i);
716
717		/* MSI handling */
718		if (i == 1)
719			mpic_handle_msi_irq(mpic);
720
721		/* IPI Handling */
722		if (i == 0)
723			mpic_handle_ipi_irq(mpic);
724	} while (1);
725}
726
727static int mpic_suspend(void)
728{
729	struct mpic *mpic = mpic_data;
730
731	mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
732
733	return 0;
734}
735
736static void mpic_resume(void)
737{
738	struct mpic *mpic = mpic_data;
739	bool src0, src1;
740
741	/* Re-enable interrupts */
742	for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) {
743		unsigned int virq = irq_linear_revmap(mpic->domain, i);
744		struct irq_data *d;
745
746		if (!virq)
747			continue;
748
749		d = irq_get_irq_data(virq);
750
751		if (!mpic_is_percpu_irq(i)) {
752			/* Non per-CPU interrupts */
753			writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
754			if (!irqd_irq_disabled(d))
755				mpic_irq_unmask(d);
756		} else {
757			/* Per-CPU interrupts */
758			writel(i, mpic->base + MPIC_INT_SET_ENABLE);
759
760			/*
761			 * Re-enable on the current CPU, mpic_reenable_percpu()
762			 * will take care of secondary CPUs when they come up.
763			 */
764			if (irq_percpu_is_enabled(virq))
765				mpic_irq_unmask(d);
766		}
767	}
768
769	/* Reconfigure doorbells for IPIs and MSIs */
770	writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
771
772	if (mpic_is_ipi_available(mpic)) {
773		src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK;
774		src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK;
775	} else {
776		src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
777		src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
778	}
779
780	if (src0)
781		writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
782	if (src1)
783		writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
784
785	if (mpic_is_ipi_available(mpic))
786		mpic_ipi_resume(mpic);
787}
788
789static struct syscore_ops mpic_syscore_ops = {
790	.suspend	= mpic_suspend,
791	.resume		= mpic_resume,
792};
793
794static int __init mpic_map_region(struct device_node *np, int index,
795				  void __iomem **base, phys_addr_t *phys_base)
796{
797	struct resource res;
798	int err;
799
800	err = of_address_to_resource(np, index, &res);
801	if (WARN_ON(err))
802		goto fail;
803
804	if (WARN_ON(!request_mem_region(res.start, resource_size(&res), np->full_name))) {
805		err = -EBUSY;
806		goto fail;
807	}
808
809	*base = ioremap(res.start, resource_size(&res));
810	if (WARN_ON(!*base)) {
811		err = -ENOMEM;
812		goto fail;
813	}
814
815	if (phys_base)
816		*phys_base = res.start;
817
818	return 0;
819
820fail:
821	pr_err("%pOF: Unable to map resource %d: %pE\n", np, index, ERR_PTR(err));
822	return err;
823}
824
825static int __init mpic_of_init(struct device_node *node, struct device_node *parent)
826{
827	phys_addr_t phys_base;
828	unsigned int nr_irqs;
829	struct mpic *mpic;
830	int err;
831
832	mpic = kzalloc(sizeof(*mpic), GFP_KERNEL);
833	if (WARN_ON(!mpic))
834		return -ENOMEM;
835
836	mpic_data = mpic;
837
838	err = mpic_map_region(node, 0, &mpic->base, &phys_base);
839	if (err)
840		return err;
841
842	err = mpic_map_region(node, 1, &mpic->per_cpu, NULL);
843	if (err)
844		return err;
845
846	nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL));
847
848	for (irq_hw_number_t i = 0; i < nr_irqs; i++)
849		writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE);
850
851	/*
852	 * Initialize mpic->parent_irq before calling any other functions, since
853	 * it is used to distinguish between IPI and non-IPI platforms.
854	 */
855	mpic->parent_irq = irq_of_parse_and_map(node, 0);
856
857	/*
858	 * On non-IPI platforms the driver currently supports only the per-CPU
859	 * interrupts (the first 29 interrupts). See mpic_handle_cascade_irq().
860	 */
861	if (!mpic_is_ipi_available(mpic))
862		nr_irqs = MPIC_PER_CPU_IRQS_NR;
863
864	mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic);
865	if (!mpic->domain) {
866		pr_err("%pOF: Unable to add IRQ domain\n", node);
867		return -ENOMEM;
868	}
869
870	irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED);
871
872	/* Setup for the boot CPU */
873	mpic_perf_init(mpic);
874	mpic_smp_cpu_init(mpic);
875
876	err = mpic_msi_init(mpic, node, phys_base);
877	if (err) {
878		pr_err("%pOF: Unable to initialize MSI domain\n", node);
879		return err;
880	}
881
882	if (mpic_is_ipi_available(mpic)) {
883		irq_set_default_host(mpic->domain);
884		set_handle_irq(mpic_handle_irq);
885#ifdef CONFIG_SMP
886		err = mpic_ipi_init(mpic, node);
887		if (err) {
888			pr_err("%pOF: Unable to initialize IPI domain\n", node);
889			return err;
890		}
891
892		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
893					  "irqchip/armada/ipi:starting",
894					  mpic_starting_cpu, NULL);
895#endif
896	} else {
897#ifdef CONFIG_SMP
898		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
899					  "irqchip/armada/cascade:starting",
900					  mpic_cascaded_starting_cpu, NULL);
901#endif
902		irq_set_chained_handler_and_data(mpic->parent_irq,
903						 mpic_handle_cascade_irq, mpic);
904	}
905
906	register_syscore_ops(&mpic_syscore_ops);
907
908	return 0;
909}
910
911IRQCHIP_DECLARE(marvell_mpic, "marvell,mpic", mpic_of_init);