Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2017 SiFive
  4 * Copyright (C) 2018 Christoph Hellwig
  5 */
  6#define pr_fmt(fmt) "plic: " fmt
  7#include <linux/cpu.h>
  8#include <linux/interrupt.h>
  9#include <linux/io.h>
 10#include <linux/irq.h>
 11#include <linux/irqchip.h>
 12#include <linux/irqchip/chained_irq.h>
 13#include <linux/irqdomain.h>
 14#include <linux/module.h>
 15#include <linux/of.h>
 16#include <linux/of_address.h>
 17#include <linux/of_irq.h>
 18#include <linux/platform_device.h>
 19#include <linux/spinlock.h>
 20#include <linux/syscore_ops.h>
 21#include <asm/smp.h>
 22
 23/*
 24 * This driver implements a version of the RISC-V PLIC with the actual layout
 25 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
 26 *
 27 *     https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
 28 *
 29 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
 30 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
 31 * Spec.
 32 */
 33
 34#define MAX_DEVICES			1024
 35#define MAX_CONTEXTS			15872
 36
 37/*
 38 * Each interrupt source has a priority register associated with it.
 39 * We always hardwire it to one in Linux.
 40 */
 41#define PRIORITY_BASE			0
 42#define     PRIORITY_PER_ID		4
 43
 44/*
 45 * Each hart context has a vector of interrupt enable bits associated with it.
 46 * There's one bit for each interrupt source.
 47 */
 48#define CONTEXT_ENABLE_BASE		0x2000
 49#define     CONTEXT_ENABLE_SIZE		0x80
 50
 51/*
 52 * Each hart context has a set of control registers associated with it.  Right
 53 * now there's only two: a source priority threshold over which the hart will
 54 * take an interrupt, and a register to claim interrupts.
 55 */
 56#define CONTEXT_BASE			0x200000
 57#define     CONTEXT_SIZE		0x1000
 58#define     CONTEXT_THRESHOLD		0x00
 59#define     CONTEXT_CLAIM		0x04
 60
 61#define	PLIC_DISABLE_THRESHOLD		0x7
 62#define	PLIC_ENABLE_THRESHOLD		0
 63
 64#define PLIC_QUIRK_EDGE_INTERRUPT	0
 65
 66struct plic_priv {
 
 67	struct cpumask lmask;
 68	struct irq_domain *irqdomain;
 69	void __iomem *regs;
 70	unsigned long plic_quirks;
 71	unsigned int nr_irqs;
 72	unsigned long *prio_save;
 73};
 74
 75struct plic_handler {
 76	bool			present;
 77	void __iomem		*hart_base;
 78	/*
 79	 * Protect mask operations on the registers given that we can't
 80	 * assume atomic memory operations work on them.
 81	 */
 82	raw_spinlock_t		enable_lock;
 83	void __iomem		*enable_base;
 84	u32			*enable_save;
 85	struct plic_priv	*priv;
 86};
 87static int plic_parent_irq __ro_after_init;
 88static bool plic_cpuhp_setup_done __ro_after_init;
 89static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
 90
 91static int plic_irq_set_type(struct irq_data *d, unsigned int type);
 92
 93static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
 94{
 95	u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
 96	u32 hwirq_mask = 1 << (hwirq % 32);
 97
 98	if (enable)
 99		writel(readl(reg) | hwirq_mask, reg);
100	else
101		writel(readl(reg) & ~hwirq_mask, reg);
102}
103
104static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
105{
106	raw_spin_lock(&handler->enable_lock);
 
 
107	__plic_toggle(handler->enable_base, hwirq, enable);
108	raw_spin_unlock(&handler->enable_lock);
109}
110
111static inline void plic_irq_toggle(const struct cpumask *mask,
112				   struct irq_data *d, int enable)
113{
114	int cpu;
115
116	for_each_cpu(cpu, mask) {
117		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
118
119		plic_toggle(handler, d->hwirq, enable);
120	}
121}
122
123static void plic_irq_enable(struct irq_data *d)
124{
125	plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
126}
127
128static void plic_irq_disable(struct irq_data *d)
129{
130	plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
131}
132
133static void plic_irq_unmask(struct irq_data *d)
134{
135	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
136
137	writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
138}
139
140static void plic_irq_mask(struct irq_data *d)
141{
142	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
143
144	writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
145}
146
147static void plic_irq_eoi(struct irq_data *d)
148{
149	struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
150
151	if (unlikely(irqd_irq_disabled(d))) {
152		plic_toggle(handler, d->hwirq, 1);
153		writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
154		plic_toggle(handler, d->hwirq, 0);
155	} else {
156		writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
157	}
158}
159
160#ifdef CONFIG_SMP
161static int plic_set_affinity(struct irq_data *d,
162			     const struct cpumask *mask_val, bool force)
163{
164	unsigned int cpu;
165	struct cpumask amask;
166	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
167
168	cpumask_and(&amask, &priv->lmask, mask_val);
169
170	if (force)
171		cpu = cpumask_first(&amask);
172	else
173		cpu = cpumask_any_and(&amask, cpu_online_mask);
174
175	if (cpu >= nr_cpu_ids)
176		return -EINVAL;
177
178	plic_irq_disable(d);
179
180	irq_data_update_effective_affinity(d, cpumask_of(cpu));
181
182	if (!irqd_irq_disabled(d))
183		plic_irq_enable(d);
184
185	return IRQ_SET_MASK_OK_DONE;
186}
187#endif
188
189static struct irq_chip plic_edge_chip = {
190	.name		= "SiFive PLIC",
191	.irq_enable	= plic_irq_enable,
192	.irq_disable	= plic_irq_disable,
193	.irq_ack	= plic_irq_eoi,
194	.irq_mask	= plic_irq_mask,
195	.irq_unmask	= plic_irq_unmask,
196#ifdef CONFIG_SMP
197	.irq_set_affinity = plic_set_affinity,
198#endif
199	.irq_set_type	= plic_irq_set_type,
200	.flags		= IRQCHIP_SKIP_SET_WAKE |
201			  IRQCHIP_AFFINITY_PRE_STARTUP,
202};
203
204static struct irq_chip plic_chip = {
205	.name		= "SiFive PLIC",
206	.irq_enable	= plic_irq_enable,
207	.irq_disable	= plic_irq_disable,
208	.irq_mask	= plic_irq_mask,
209	.irq_unmask	= plic_irq_unmask,
210	.irq_eoi	= plic_irq_eoi,
211#ifdef CONFIG_SMP
212	.irq_set_affinity = plic_set_affinity,
213#endif
214	.irq_set_type	= plic_irq_set_type,
215	.flags		= IRQCHIP_SKIP_SET_WAKE |
216			  IRQCHIP_AFFINITY_PRE_STARTUP,
217};
218
219static int plic_irq_set_type(struct irq_data *d, unsigned int type)
220{
221	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
222
223	if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
224		return IRQ_SET_MASK_OK_NOCOPY;
225
226	switch (type) {
227	case IRQ_TYPE_EDGE_RISING:
228		irq_set_chip_handler_name_locked(d, &plic_edge_chip,
229						 handle_edge_irq, NULL);
230		break;
231	case IRQ_TYPE_LEVEL_HIGH:
232		irq_set_chip_handler_name_locked(d, &plic_chip,
233						 handle_fasteoi_irq, NULL);
234		break;
235	default:
236		return -EINVAL;
237	}
238
239	return IRQ_SET_MASK_OK;
240}
241
242static int plic_irq_suspend(void)
243{
244	unsigned int i, cpu;
 
245	u32 __iomem *reg;
246	struct plic_priv *priv;
247
248	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
249
250	for (i = 0; i < priv->nr_irqs; i++)
251		if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID))
252			__set_bit(i, priv->prio_save);
253		else
254			__clear_bit(i, priv->prio_save);
255
256	for_each_cpu(cpu, cpu_present_mask) {
257		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
258
259		if (!handler->present)
260			continue;
261
262		raw_spin_lock(&handler->enable_lock);
263		for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
264			reg = handler->enable_base + i * sizeof(u32);
265			handler->enable_save[i] = readl(reg);
266		}
267		raw_spin_unlock(&handler->enable_lock);
268	}
269
270	return 0;
271}
272
273static void plic_irq_resume(void)
274{
275	unsigned int i, index, cpu;
 
276	u32 __iomem *reg;
277	struct plic_priv *priv;
278
279	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
280
281	for (i = 0; i < priv->nr_irqs; i++) {
282		index = BIT_WORD(i);
283		writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
284		       priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
285	}
286
287	for_each_cpu(cpu, cpu_present_mask) {
288		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
289
290		if (!handler->present)
291			continue;
292
293		raw_spin_lock(&handler->enable_lock);
294		for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
295			reg = handler->enable_base + i * sizeof(u32);
296			writel(handler->enable_save[i], reg);
297		}
298		raw_spin_unlock(&handler->enable_lock);
299	}
300}
301
302static struct syscore_ops plic_irq_syscore_ops = {
303	.suspend	= plic_irq_suspend,
304	.resume		= plic_irq_resume,
305};
306
307static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
308			      irq_hw_number_t hwirq)
309{
310	struct plic_priv *priv = d->host_data;
311
312	irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
313			    handle_fasteoi_irq, NULL, NULL);
314	irq_set_noprobe(irq);
315	irq_set_affinity(irq, &priv->lmask);
316	return 0;
317}
318
319static int plic_irq_domain_translate(struct irq_domain *d,
320				     struct irq_fwspec *fwspec,
321				     unsigned long *hwirq,
322				     unsigned int *type)
323{
324	struct plic_priv *priv = d->host_data;
325
326	if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
327		return irq_domain_translate_twocell(d, fwspec, hwirq, type);
328
329	return irq_domain_translate_onecell(d, fwspec, hwirq, type);
330}
331
332static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
333				 unsigned int nr_irqs, void *arg)
334{
335	int i, ret;
336	irq_hw_number_t hwirq;
337	unsigned int type;
338	struct irq_fwspec *fwspec = arg;
339
340	ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type);
341	if (ret)
342		return ret;
343
344	for (i = 0; i < nr_irqs; i++) {
345		ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
346		if (ret)
347			return ret;
348	}
349
350	return 0;
351}
352
353static const struct irq_domain_ops plic_irqdomain_ops = {
354	.translate	= plic_irq_domain_translate,
355	.alloc		= plic_irq_domain_alloc,
356	.free		= irq_domain_free_irqs_top,
357};
358
359/*
360 * Handling an interrupt is a two-step process: first you claim the interrupt
361 * by reading the claim register, then you complete the interrupt by writing
362 * that source ID back to the same claim register.  This automatically enables
363 * and disables the interrupt, so there's nothing else to do.
364 */
365static void plic_handle_irq(struct irq_desc *desc)
366{
367	struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
368	struct irq_chip *chip = irq_desc_get_chip(desc);
369	void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
370	irq_hw_number_t hwirq;
371
372	WARN_ON_ONCE(!handler->present);
373
374	chained_irq_enter(chip, desc);
375
376	while ((hwirq = readl(claim))) {
377		int err = generic_handle_domain_irq(handler->priv->irqdomain,
378						    hwirq);
379		if (unlikely(err))
380			pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
381					hwirq);
 
382	}
383
384	chained_irq_exit(chip, desc);
385}
386
387static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
388{
389	/* priority must be > threshold to trigger an interrupt */
390	writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
391}
392
393static int plic_dying_cpu(unsigned int cpu)
394{
395	if (plic_parent_irq)
396		disable_percpu_irq(plic_parent_irq);
397
398	return 0;
399}
400
401static int plic_starting_cpu(unsigned int cpu)
402{
403	struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
404
405	if (plic_parent_irq)
406		enable_percpu_irq(plic_parent_irq,
407				  irq_get_trigger_type(plic_parent_irq));
408	else
409		pr_warn("cpu%d: parent irq not available\n", cpu);
410	plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
411
412	return 0;
413}
414
415static int __init __plic_init(struct device_node *node,
416			      struct device_node *parent,
417			      unsigned long plic_quirks)
 
 
 
 
 
 
 
 
 
418{
419	int error = 0, nr_contexts, nr_handlers = 0, i;
420	u32 nr_irqs;
421	struct plic_priv *priv;
422	struct plic_handler *handler;
423	unsigned int cpu;
424
425	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
426	if (!priv)
427		return -ENOMEM;
 
 
 
428
429	priv->plic_quirks = plic_quirks;
 
 
 
 
430
431	priv->regs = of_iomap(node, 0);
432	if (WARN_ON(!priv->regs)) {
433		error = -EIO;
434		goto out_free_priv;
435	}
436
437	error = -EINVAL;
438	of_property_read_u32(node, "riscv,ndev", &nr_irqs);
439	if (WARN_ON(!nr_irqs))
440		goto out_iounmap;
 
 
 
 
 
 
441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442	priv->nr_irqs = nr_irqs;
443
444	priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL);
445	if (!priv->prio_save)
446		goto out_free_priority_reg;
447
448	nr_contexts = of_irq_count(node);
449	if (WARN_ON(!nr_contexts))
450		goto out_free_priority_reg;
451
452	error = -ENOMEM;
453	priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
454			&plic_irqdomain_ops, priv);
455	if (WARN_ON(!priv->irqdomain))
456		goto out_free_priority_reg;
457
458	for (i = 0; i < nr_contexts; i++) {
459		struct of_phandle_args parent;
460		irq_hw_number_t hwirq;
461		int cpu;
462		unsigned long hartid;
463
464		if (of_irq_parse_one(node, i, &parent)) {
465			pr_err("failed to parse parent for context %d.\n", i);
466			continue;
467		}
468
469		/*
470		 * Skip contexts other than external interrupts for our
471		 * privilege level.
472		 */
473		if (parent.args[0] != RV_IRQ_EXT) {
474			/* Disable S-mode enable bits if running in M-mode. */
475			if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
476				void __iomem *enable_base = priv->regs +
477					CONTEXT_ENABLE_BASE +
478					i * CONTEXT_ENABLE_SIZE;
479
480				for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
481					__plic_toggle(enable_base, hwirq, 0);
482			}
483			continue;
484		}
485
486		error = riscv_of_parent_hartid(parent.np, &hartid);
487		if (error < 0) {
488			pr_warn("failed to parse hart ID for context %d.\n", i);
489			continue;
490		}
491
492		cpu = riscv_hartid_to_cpuid(hartid);
493		if (cpu < 0) {
494			pr_warn("Invalid cpuid for context %d\n", i);
495			continue;
496		}
497
498		/* Find parent domain and register chained handler */
499		if (!plic_parent_irq && irq_find_host(parent.np)) {
500			plic_parent_irq = irq_of_parse_and_map(node, i);
 
501			if (plic_parent_irq)
502				irq_set_chained_handler(plic_parent_irq,
503							plic_handle_irq);
504		}
505
506		/*
507		 * When running in M-mode we need to ignore the S-mode handler.
508		 * Here we assume it always comes later, but that might be a
509		 * little fragile.
510		 */
511		handler = per_cpu_ptr(&plic_handlers, cpu);
512		if (handler->present) {
513			pr_warn("handler already present for context %d.\n", i);
514			plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
515			goto done;
516		}
517
518		cpumask_set_cpu(cpu, &priv->lmask);
519		handler->present = true;
520		handler->hart_base = priv->regs + CONTEXT_BASE +
521			i * CONTEXT_SIZE;
522		raw_spin_lock_init(&handler->enable_lock);
523		handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
524			i * CONTEXT_ENABLE_SIZE;
525		handler->priv = priv;
526
527		handler->enable_save =  kcalloc(DIV_ROUND_UP(nr_irqs, 32),
528						sizeof(*handler->enable_save), GFP_KERNEL);
529		if (!handler->enable_save)
530			goto out_free_enable_reg;
531done:
532		for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
533			plic_toggle(handler, hwirq, 0);
534			writel(1, priv->regs + PRIORITY_BASE +
535				  hwirq * PRIORITY_PER_ID);
536		}
537		nr_handlers++;
538	}
539
 
 
 
 
 
540	/*
541	 * We can have multiple PLIC instances so setup cpuhp state
542	 * and register syscore operations only when context handler
543	 * for current/boot CPU is present.
544	 */
545	handler = this_cpu_ptr(&plic_handlers);
546	if (handler->present && !plic_cpuhp_setup_done) {
547		cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
548				  "irqchip/sifive/plic:starting",
549				  plic_starting_cpu, plic_dying_cpu);
550		register_syscore_ops(&plic_irq_syscore_ops);
551		plic_cpuhp_setup_done = true;
 
 
 
 
 
 
 
 
 
552	}
553
554	pr_info("%pOFP: mapped %d interrupts with %d handlers for"
555		" %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
556	return 0;
557
558out_free_enable_reg:
559	for_each_cpu(cpu, cpu_present_mask) {
560		handler = per_cpu_ptr(&plic_handlers, cpu);
561		kfree(handler->enable_save);
562	}
563out_free_priority_reg:
564	kfree(priv->prio_save);
565out_iounmap:
566	iounmap(priv->regs);
567out_free_priv:
568	kfree(priv);
569	return error;
570}
571
572static int __init plic_init(struct device_node *node,
573			    struct device_node *parent)
574{
575	return __plic_init(node, parent, 0);
576}
577
578IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
579IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
580
581static int __init plic_edge_init(struct device_node *node,
582				 struct device_node *parent)
583{
584	return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT));
 
 
 
 
585}
586
587IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init);
588IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init);
 
 
 
 
 
 
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2017 SiFive
  4 * Copyright (C) 2018 Christoph Hellwig
  5 */
 
  6#include <linux/cpu.h>
  7#include <linux/interrupt.h>
  8#include <linux/io.h>
  9#include <linux/irq.h>
 10#include <linux/irqchip.h>
 11#include <linux/irqchip/chained_irq.h>
 12#include <linux/irqdomain.h>
 13#include <linux/module.h>
 14#include <linux/of.h>
 15#include <linux/of_address.h>
 16#include <linux/of_irq.h>
 17#include <linux/platform_device.h>
 18#include <linux/spinlock.h>
 19#include <linux/syscore_ops.h>
 20#include <asm/smp.h>
 21
 22/*
 23 * This driver implements a version of the RISC-V PLIC with the actual layout
 24 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
 25 *
 26 *     https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
 27 *
 28 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
 29 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
 30 * Spec.
 31 */
 32
 33#define MAX_DEVICES			1024
 34#define MAX_CONTEXTS			15872
 35
 36/*
 37 * Each interrupt source has a priority register associated with it.
 38 * We always hardwire it to one in Linux.
 39 */
 40#define PRIORITY_BASE			0
 41#define     PRIORITY_PER_ID		4
 42
 43/*
 44 * Each hart context has a vector of interrupt enable bits associated with it.
 45 * There's one bit for each interrupt source.
 46 */
 47#define CONTEXT_ENABLE_BASE		0x2000
 48#define     CONTEXT_ENABLE_SIZE		0x80
 49
 50/*
 51 * Each hart context has a set of control registers associated with it.  Right
 52 * now there's only two: a source priority threshold over which the hart will
 53 * take an interrupt, and a register to claim interrupts.
 54 */
 55#define CONTEXT_BASE			0x200000
 56#define     CONTEXT_SIZE		0x1000
 57#define     CONTEXT_THRESHOLD		0x00
 58#define     CONTEXT_CLAIM		0x04
 59
 60#define	PLIC_DISABLE_THRESHOLD		0x7
 61#define	PLIC_ENABLE_THRESHOLD		0
 62
 63#define PLIC_QUIRK_EDGE_INTERRUPT	0
 64
 65struct plic_priv {
 66	struct device *dev;
 67	struct cpumask lmask;
 68	struct irq_domain *irqdomain;
 69	void __iomem *regs;
 70	unsigned long plic_quirks;
 71	unsigned int nr_irqs;
 72	unsigned long *prio_save;
 73};
 74
 75struct plic_handler {
 76	bool			present;
 77	void __iomem		*hart_base;
 78	/*
 79	 * Protect mask operations on the registers given that we can't
 80	 * assume atomic memory operations work on them.
 81	 */
 82	raw_spinlock_t		enable_lock;
 83	void __iomem		*enable_base;
 84	u32			*enable_save;
 85	struct plic_priv	*priv;
 86};
 87static int plic_parent_irq __ro_after_init;
 88static bool plic_cpuhp_setup_done __ro_after_init;
 89static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
 90
 91static int plic_irq_set_type(struct irq_data *d, unsigned int type);
 92
 93static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
 94{
 95	u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
 96	u32 hwirq_mask = 1 << (hwirq % 32);
 97
 98	if (enable)
 99		writel(readl(reg) | hwirq_mask, reg);
100	else
101		writel(readl(reg) & ~hwirq_mask, reg);
102}
103
104static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
105{
106	unsigned long flags;
107
108	raw_spin_lock_irqsave(&handler->enable_lock, flags);
109	__plic_toggle(handler->enable_base, hwirq, enable);
110	raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
111}
112
113static inline void plic_irq_toggle(const struct cpumask *mask,
114				   struct irq_data *d, int enable)
115{
116	int cpu;
117
118	for_each_cpu(cpu, mask) {
119		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
120
121		plic_toggle(handler, d->hwirq, enable);
122	}
123}
124
125static void plic_irq_enable(struct irq_data *d)
126{
127	plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
128}
129
130static void plic_irq_disable(struct irq_data *d)
131{
132	plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
133}
134
135static void plic_irq_unmask(struct irq_data *d)
136{
137	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
138
139	writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
140}
141
142static void plic_irq_mask(struct irq_data *d)
143{
144	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
145
146	writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
147}
148
149static void plic_irq_eoi(struct irq_data *d)
150{
151	struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
152
153	if (unlikely(irqd_irq_disabled(d))) {
154		plic_toggle(handler, d->hwirq, 1);
155		writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
156		plic_toggle(handler, d->hwirq, 0);
157	} else {
158		writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
159	}
160}
161
162#ifdef CONFIG_SMP
163static int plic_set_affinity(struct irq_data *d,
164			     const struct cpumask *mask_val, bool force)
165{
166	unsigned int cpu;
167	struct cpumask amask;
168	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
169
170	cpumask_and(&amask, &priv->lmask, mask_val);
171
172	if (force)
173		cpu = cpumask_first(&amask);
174	else
175		cpu = cpumask_any_and(&amask, cpu_online_mask);
176
177	if (cpu >= nr_cpu_ids)
178		return -EINVAL;
179
180	plic_irq_disable(d);
181
182	irq_data_update_effective_affinity(d, cpumask_of(cpu));
183
184	if (!irqd_irq_disabled(d))
185		plic_irq_enable(d);
186
187	return IRQ_SET_MASK_OK_DONE;
188}
189#endif
190
191static struct irq_chip plic_edge_chip = {
192	.name		= "SiFive PLIC",
193	.irq_enable	= plic_irq_enable,
194	.irq_disable	= plic_irq_disable,
195	.irq_ack	= plic_irq_eoi,
196	.irq_mask	= plic_irq_mask,
197	.irq_unmask	= plic_irq_unmask,
198#ifdef CONFIG_SMP
199	.irq_set_affinity = plic_set_affinity,
200#endif
201	.irq_set_type	= plic_irq_set_type,
202	.flags		= IRQCHIP_SKIP_SET_WAKE |
203			  IRQCHIP_AFFINITY_PRE_STARTUP,
204};
205
206static struct irq_chip plic_chip = {
207	.name		= "SiFive PLIC",
208	.irq_enable	= plic_irq_enable,
209	.irq_disable	= plic_irq_disable,
210	.irq_mask	= plic_irq_mask,
211	.irq_unmask	= plic_irq_unmask,
212	.irq_eoi	= plic_irq_eoi,
213#ifdef CONFIG_SMP
214	.irq_set_affinity = plic_set_affinity,
215#endif
216	.irq_set_type	= plic_irq_set_type,
217	.flags		= IRQCHIP_SKIP_SET_WAKE |
218			  IRQCHIP_AFFINITY_PRE_STARTUP,
219};
220
221static int plic_irq_set_type(struct irq_data *d, unsigned int type)
222{
223	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
224
225	if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
226		return IRQ_SET_MASK_OK_NOCOPY;
227
228	switch (type) {
229	case IRQ_TYPE_EDGE_RISING:
230		irq_set_chip_handler_name_locked(d, &plic_edge_chip,
231						 handle_edge_irq, NULL);
232		break;
233	case IRQ_TYPE_LEVEL_HIGH:
234		irq_set_chip_handler_name_locked(d, &plic_chip,
235						 handle_fasteoi_irq, NULL);
236		break;
237	default:
238		return -EINVAL;
239	}
240
241	return IRQ_SET_MASK_OK;
242}
243
244static int plic_irq_suspend(void)
245{
246	unsigned int i, cpu;
247	unsigned long flags;
248	u32 __iomem *reg;
249	struct plic_priv *priv;
250
251	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
252
253	for (i = 0; i < priv->nr_irqs; i++)
254		if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID))
255			__set_bit(i, priv->prio_save);
256		else
257			__clear_bit(i, priv->prio_save);
258
259	for_each_cpu(cpu, cpu_present_mask) {
260		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
261
262		if (!handler->present)
263			continue;
264
265		raw_spin_lock_irqsave(&handler->enable_lock, flags);
266		for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
267			reg = handler->enable_base + i * sizeof(u32);
268			handler->enable_save[i] = readl(reg);
269		}
270		raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
271	}
272
273	return 0;
274}
275
276static void plic_irq_resume(void)
277{
278	unsigned int i, index, cpu;
279	unsigned long flags;
280	u32 __iomem *reg;
281	struct plic_priv *priv;
282
283	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
284
285	for (i = 0; i < priv->nr_irqs; i++) {
286		index = BIT_WORD(i);
287		writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
288		       priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
289	}
290
291	for_each_cpu(cpu, cpu_present_mask) {
292		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
293
294		if (!handler->present)
295			continue;
296
297		raw_spin_lock_irqsave(&handler->enable_lock, flags);
298		for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
299			reg = handler->enable_base + i * sizeof(u32);
300			writel(handler->enable_save[i], reg);
301		}
302		raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
303	}
304}
305
306static struct syscore_ops plic_irq_syscore_ops = {
307	.suspend	= plic_irq_suspend,
308	.resume		= plic_irq_resume,
309};
310
311static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
312			      irq_hw_number_t hwirq)
313{
314	struct plic_priv *priv = d->host_data;
315
316	irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
317			    handle_fasteoi_irq, NULL, NULL);
318	irq_set_noprobe(irq);
319	irq_set_affinity(irq, &priv->lmask);
320	return 0;
321}
322
323static int plic_irq_domain_translate(struct irq_domain *d,
324				     struct irq_fwspec *fwspec,
325				     unsigned long *hwirq,
326				     unsigned int *type)
327{
328	struct plic_priv *priv = d->host_data;
329
330	if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
331		return irq_domain_translate_twocell(d, fwspec, hwirq, type);
332
333	return irq_domain_translate_onecell(d, fwspec, hwirq, type);
334}
335
336static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
337				 unsigned int nr_irqs, void *arg)
338{
339	int i, ret;
340	irq_hw_number_t hwirq;
341	unsigned int type;
342	struct irq_fwspec *fwspec = arg;
343
344	ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type);
345	if (ret)
346		return ret;
347
348	for (i = 0; i < nr_irqs; i++) {
349		ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
350		if (ret)
351			return ret;
352	}
353
354	return 0;
355}
356
357static const struct irq_domain_ops plic_irqdomain_ops = {
358	.translate	= plic_irq_domain_translate,
359	.alloc		= plic_irq_domain_alloc,
360	.free		= irq_domain_free_irqs_top,
361};
362
363/*
364 * Handling an interrupt is a two-step process: first you claim the interrupt
365 * by reading the claim register, then you complete the interrupt by writing
366 * that source ID back to the same claim register.  This automatically enables
367 * and disables the interrupt, so there's nothing else to do.
368 */
369static void plic_handle_irq(struct irq_desc *desc)
370{
371	struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
372	struct irq_chip *chip = irq_desc_get_chip(desc);
373	void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
374	irq_hw_number_t hwirq;
375
376	WARN_ON_ONCE(!handler->present);
377
378	chained_irq_enter(chip, desc);
379
380	while ((hwirq = readl(claim))) {
381		int err = generic_handle_domain_irq(handler->priv->irqdomain,
382						    hwirq);
383		if (unlikely(err)) {
384			dev_warn_ratelimited(handler->priv->dev,
385					     "can't find mapping for hwirq %lu\n", hwirq);
386		}
387	}
388
389	chained_irq_exit(chip, desc);
390}
391
392static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
393{
394	/* priority must be > threshold to trigger an interrupt */
395	writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
396}
397
398static int plic_dying_cpu(unsigned int cpu)
399{
400	if (plic_parent_irq)
401		disable_percpu_irq(plic_parent_irq);
402
403	return 0;
404}
405
406static int plic_starting_cpu(unsigned int cpu)
407{
408	struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
409
410	if (plic_parent_irq)
411		enable_percpu_irq(plic_parent_irq,
412				  irq_get_trigger_type(plic_parent_irq));
413	else
414		dev_warn(handler->priv->dev, "cpu%d: parent irq not available\n", cpu);
415	plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
416
417	return 0;
418}
419
420static const struct of_device_id plic_match[] = {
421	{ .compatible = "sifive,plic-1.0.0" },
422	{ .compatible = "riscv,plic0" },
423	{ .compatible = "andestech,nceplic100",
424	  .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
425	{ .compatible = "thead,c900-plic",
426	  .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
427	{}
428};
429
430static int plic_parse_nr_irqs_and_contexts(struct platform_device *pdev,
431					   u32 *nr_irqs, u32 *nr_contexts)
432{
433	struct device *dev = &pdev->dev;
434	int rc;
 
 
 
435
436	/*
437	 * Currently, only OF fwnode is supported so extend this
438	 * function for ACPI support.
439	 */
440	if (!is_of_node(dev->fwnode))
441		return -EINVAL;
442
443	rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,ndev", nr_irqs);
444	if (rc) {
445		dev_err(dev, "riscv,ndev property not available\n");
446		return rc;
447	}
448
449	*nr_contexts = of_irq_count(to_of_node(dev->fwnode));
450	if (WARN_ON(!(*nr_contexts))) {
451		dev_err(dev, "no PLIC context available\n");
452		return -EINVAL;
453	}
454
455	return 0;
456}
457
458static int plic_parse_context_parent(struct platform_device *pdev, u32 context,
459				     u32 *parent_hwirq, int *parent_cpu)
460{
461	struct device *dev = &pdev->dev;
462	struct of_phandle_args parent;
463	unsigned long hartid;
464	int rc;
465
466	/*
467	 * Currently, only OF fwnode is supported so extend this
468	 * function for ACPI support.
469	 */
470	if (!is_of_node(dev->fwnode))
471		return -EINVAL;
472
473	rc = of_irq_parse_one(to_of_node(dev->fwnode), context, &parent);
474	if (rc)
475		return rc;
476
477	rc = riscv_of_parent_hartid(parent.np, &hartid);
478	if (rc)
479		return rc;
480
481	*parent_hwirq = parent.args[0];
482	*parent_cpu = riscv_hartid_to_cpuid(hartid);
483	return 0;
484}
485
486static int plic_probe(struct platform_device *pdev)
487{
488	int error = 0, nr_contexts, nr_handlers = 0, cpu, i;
489	struct device *dev = &pdev->dev;
490	unsigned long plic_quirks = 0;
491	struct plic_handler *handler;
492	u32 nr_irqs, parent_hwirq;
493	struct irq_domain *domain;
494	struct plic_priv *priv;
495	irq_hw_number_t hwirq;
496	bool cpuhp_setup;
497
498	if (is_of_node(dev->fwnode)) {
499		const struct of_device_id *id;
500
501		id = of_match_node(plic_match, to_of_node(dev->fwnode));
502		if (id)
503			plic_quirks = (unsigned long)id->data;
504	}
505
506	error = plic_parse_nr_irqs_and_contexts(pdev, &nr_irqs, &nr_contexts);
507	if (error)
508		return error;
509
510	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
511	if (!priv)
512		return -ENOMEM;
513
514	priv->dev = dev;
515	priv->plic_quirks = plic_quirks;
516	priv->nr_irqs = nr_irqs;
517
518	priv->regs = devm_platform_ioremap_resource(pdev, 0);
519	if (WARN_ON(!priv->regs))
520		return -EIO;
521
522	priv->prio_save = devm_bitmap_zalloc(dev, nr_irqs, GFP_KERNEL);
523	if (!priv->prio_save)
524		return -ENOMEM;
 
 
 
 
 
 
525
526	for (i = 0; i < nr_contexts; i++) {
527		error = plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu);
528		if (error) {
529			dev_warn(dev, "hwirq for context%d not found\n", i);
 
 
 
 
530			continue;
531		}
532
533		/*
534		 * Skip contexts other than external interrupts for our
535		 * privilege level.
536		 */
537		if (parent_hwirq != RV_IRQ_EXT) {
538			/* Disable S-mode enable bits if running in M-mode. */
539			if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
540				void __iomem *enable_base = priv->regs +
541					CONTEXT_ENABLE_BASE +
542					i * CONTEXT_ENABLE_SIZE;
543
544				for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
545					__plic_toggle(enable_base, hwirq, 0);
546			}
547			continue;
548		}
549
 
 
 
 
 
 
 
550		if (cpu < 0) {
551			dev_warn(dev, "Invalid cpuid for context %d\n", i);
552			continue;
553		}
554
555		/* Find parent domain and register chained handler */
556		domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
557		if (!plic_parent_irq && domain) {
558			plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
559			if (plic_parent_irq)
560				irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
 
561		}
562
563		/*
564		 * When running in M-mode we need to ignore the S-mode handler.
565		 * Here we assume it always comes later, but that might be a
566		 * little fragile.
567		 */
568		handler = per_cpu_ptr(&plic_handlers, cpu);
569		if (handler->present) {
570			dev_warn(dev, "handler already present for context %d.\n", i);
571			plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
572			goto done;
573		}
574
575		cpumask_set_cpu(cpu, &priv->lmask);
576		handler->present = true;
577		handler->hart_base = priv->regs + CONTEXT_BASE +
578			i * CONTEXT_SIZE;
579		raw_spin_lock_init(&handler->enable_lock);
580		handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
581			i * CONTEXT_ENABLE_SIZE;
582		handler->priv = priv;
583
584		handler->enable_save = devm_kcalloc(dev, DIV_ROUND_UP(nr_irqs, 32),
585						    sizeof(*handler->enable_save), GFP_KERNEL);
586		if (!handler->enable_save)
587			goto fail_cleanup_contexts;
588done:
589		for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
590			plic_toggle(handler, hwirq, 0);
591			writel(1, priv->regs + PRIORITY_BASE +
592				  hwirq * PRIORITY_PER_ID);
593		}
594		nr_handlers++;
595	}
596
597	priv->irqdomain = irq_domain_add_linear(to_of_node(dev->fwnode), nr_irqs + 1,
598						&plic_irqdomain_ops, priv);
599	if (WARN_ON(!priv->irqdomain))
600		goto fail_cleanup_contexts;
601
602	/*
603	 * We can have multiple PLIC instances so setup cpuhp state
604	 * and register syscore operations only once after context
605	 * handlers of all online CPUs are initialized.
606	 */
607	if (!plic_cpuhp_setup_done) {
608		cpuhp_setup = true;
609		for_each_online_cpu(cpu) {
610			handler = per_cpu_ptr(&plic_handlers, cpu);
611			if (!handler->present) {
612				cpuhp_setup = false;
613				break;
614			}
615		}
616		if (cpuhp_setup) {
617			cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
618					  "irqchip/sifive/plic:starting",
619					  plic_starting_cpu, plic_dying_cpu);
620			register_syscore_ops(&plic_irq_syscore_ops);
621			plic_cpuhp_setup_done = true;
622		}
623	}
624
625	dev_info(dev, "mapped %d interrupts with %d handlers for %d contexts.\n",
626		 nr_irqs, nr_handlers, nr_contexts);
627	return 0;
628
629fail_cleanup_contexts:
630	for (i = 0; i < nr_contexts; i++) {
631		if (plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu))
632			continue;
633		if (parent_hwirq != RV_IRQ_EXT || cpu < 0)
634			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
636		handler = per_cpu_ptr(&plic_handlers, cpu);
637		handler->present = false;
638		handler->hart_base = NULL;
639		handler->enable_base = NULL;
640		handler->enable_save = NULL;
641		handler->priv = NULL;
642	}
643	return -ENOMEM;
644}
645
646static struct platform_driver plic_driver = {
647	.driver = {
648		.name		= "riscv-plic",
649		.of_match_table	= plic_match,
650	},
651	.probe = plic_probe,
652};
653builtin_platform_driver(plic_driver);