Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 * Copyright (C) 2017-2018 SiFive
  5 * Copyright (C) 2020 Western Digital Corporation or its affiliates.
  6 */
  7
  8#define pr_fmt(fmt) "riscv-intc: " fmt
  9#include <linux/acpi.h>
 10#include <linux/atomic.h>
 11#include <linux/bits.h>
 12#include <linux/cpu.h>
 13#include <linux/irq.h>
 14#include <linux/irqchip.h>
 15#include <linux/irqdomain.h>
 16#include <linux/interrupt.h>
 17#include <linux/module.h>
 18#include <linux/of.h>
 19#include <linux/smp.h>
 
 
 
 20
 21static struct irq_domain *intc_domain;
 
 
 
 22
 23static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
 24{
 25	unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
 26
 27	if (unlikely(cause >= BITS_PER_LONG))
 28		panic("unexpected interrupt cause");
 
 
 
 
 
 29
 30	generic_handle_domain_irq(intc_domain, cause);
 
 31}
 32
 33/*
 34 * On RISC-V systems local interrupts are masked or unmasked by writing
 35 * the SIE (Supervisor Interrupt Enable) CSR.  As CSRs can only be written
 36 * on the local hart, these functions can only be called on the hart that
 37 * corresponds to the IRQ chip.
 38 */
 39
 40static void riscv_intc_irq_mask(struct irq_data *d)
 41{
 42	csr_clear(CSR_IE, BIT(d->hwirq));
 
 
 
 43}
 44
 45static void riscv_intc_irq_unmask(struct irq_data *d)
 46{
 47	csr_set(CSR_IE, BIT(d->hwirq));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48}
 49
 50static void riscv_intc_irq_eoi(struct irq_data *d)
 51{
 52	/*
 53	 * The RISC-V INTC driver uses handle_percpu_devid_irq() flow
 54	 * for the per-HART local interrupts and child irqchip drivers
 55	 * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement
 56	 * chained handlers for the per-HART local interrupts.
 57	 *
 58	 * In the absence of irq_eoi(), the chained_irq_enter() and
 59	 * chained_irq_exit() functions (used by child irqchip drivers)
 60	 * will do unnecessary mask/unmask of per-HART local interrupts
 61	 * at the time of handling interrupts. To avoid this, we provide
 62	 * an empty irq_eoi() callback for RISC-V INTC irqchip.
 63	 */
 64}
 65
 66static struct irq_chip riscv_intc_chip = {
 67	.name = "RISC-V INTC",
 68	.irq_mask = riscv_intc_irq_mask,
 69	.irq_unmask = riscv_intc_irq_unmask,
 70	.irq_eoi = riscv_intc_irq_eoi,
 71};
 72
 
 
 
 
 
 
 
 73static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
 74				 irq_hw_number_t hwirq)
 75{
 
 
 76	irq_set_percpu_devid(irq);
 77	irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
 78			    handle_percpu_devid_irq, NULL, NULL);
 79
 80	return 0;
 81}
 82
 83static int riscv_intc_domain_alloc(struct irq_domain *domain,
 84				   unsigned int virq, unsigned int nr_irqs,
 85				   void *arg)
 86{
 87	int i, ret;
 88	irq_hw_number_t hwirq;
 89	unsigned int type = IRQ_TYPE_NONE;
 90	struct irq_fwspec *fwspec = arg;
 91
 92	ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
 93	if (ret)
 94		return ret;
 95
 
 
 
 
 
 
 
 
 
 96	for (i = 0; i < nr_irqs; i++) {
 97		ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
 98		if (ret)
 99			return ret;
100	}
101
102	return 0;
103}
104
105static const struct irq_domain_ops riscv_intc_domain_ops = {
106	.map	= riscv_intc_domain_map,
107	.xlate	= irq_domain_xlate_onecell,
108	.alloc	= riscv_intc_domain_alloc
109};
110
111static struct fwnode_handle *riscv_intc_hwnode(void)
112{
113	return intc_domain->fwnode;
114}
115
116static int __init riscv_intc_init_common(struct fwnode_handle *fn)
117{
118	int rc;
119
120	intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG,
121					       &riscv_intc_domain_ops, NULL);
122	if (!intc_domain) {
123		pr_err("unable to add IRQ domain\n");
124		return -ENXIO;
125	}
126
127	rc = set_handle_irq(&riscv_intc_irq);
 
 
 
 
 
128	if (rc) {
129		pr_err("failed to set irq handler\n");
130		return rc;
131	}
132
133	riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
134
135	pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
 
 
 
 
136
137	return 0;
138}
139
140static int __init riscv_intc_init(struct device_node *node,
141				  struct device_node *parent)
142{
143	int rc;
144	unsigned long hartid;
 
145
146	rc = riscv_of_parent_hartid(node, &hartid);
147	if (rc < 0) {
148		pr_warn("unable to find hart id for %pOF\n", node);
149		return 0;
150	}
151
152	/*
153	 * The DT will have one INTC DT node under each CPU (or HART)
154	 * DT node so riscv_intc_init() function will be called once
155	 * for each INTC DT node. We only need to do INTC initialization
156	 * for the INTC DT node belonging to boot CPU (or boot HART).
157	 */
158	if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
159		/*
160		 * The INTC nodes of each CPU are suppliers for downstream
161		 * interrupt controllers (such as PLIC, IMSIC and APLIC
162		 * direct-mode) so we should mark an INTC node as initialized
163		 * if we are not creating IRQ domain for it.
164		 */
165		fwnode_dev_initialized(of_fwnode_handle(node), true);
166		return 0;
167	}
168
169	return riscv_intc_init_common(of_node_to_fwnode(node));
 
 
 
 
 
 
170}
171
172IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
 
173
174#ifdef CONFIG_ACPI
175
176static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
177				       const unsigned long end)
178{
179	struct fwnode_handle *fn;
180	struct acpi_madt_rintc *rintc;
181
182	rintc = (struct acpi_madt_rintc *)header;
183
184	/*
185	 * The ACPI MADT will have one INTC for each CPU (or HART)
186	 * so riscv_intc_acpi_init() function will be called once
187	 * for each INTC. We only do INTC initialization
188	 * for the INTC belonging to the boot CPU (or boot HART).
189	 */
190	if (riscv_hartid_to_cpuid(rintc->hart_id) != smp_processor_id())
191		return 0;
192
193	fn = irq_domain_alloc_named_fwnode("RISCV-INTC");
194	if (!fn) {
195		pr_err("unable to allocate INTC FW node\n");
196		return -ENOMEM;
197	}
198
199	return riscv_intc_init_common(fn);
200}
201
202IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
203		     ACPI_MADT_RINTC_VERSION_V1, riscv_intc_acpi_init);
204#endif
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 * Copyright (C) 2017-2018 SiFive
  5 * Copyright (C) 2020 Western Digital Corporation or its affiliates.
  6 */
  7
  8#define pr_fmt(fmt) "riscv-intc: " fmt
  9#include <linux/acpi.h>
 10#include <linux/atomic.h>
 11#include <linux/bits.h>
 12#include <linux/cpu.h>
 13#include <linux/irq.h>
 14#include <linux/irqchip.h>
 15#include <linux/irqdomain.h>
 16#include <linux/interrupt.h>
 17#include <linux/module.h>
 18#include <linux/of.h>
 19#include <linux/smp.h>
 20#include <linux/soc/andes/irq.h>
 21
 22#include <asm/hwcap.h>
 23
 24static struct irq_domain *intc_domain;
 25static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
 26static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
 27static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
 28
 29static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
 30{
 31	unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
 32
 33	if (generic_handle_domain_irq(intc_domain, cause))
 34		pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
 35}
 36
 37static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs)
 38{
 39	unsigned long topi;
 40
 41	while ((topi = csr_read(CSR_TOPI)))
 42		generic_handle_domain_irq(intc_domain, topi >> TOPI_IID_SHIFT);
 43}
 44
 45/*
 46 * On RISC-V systems local interrupts are masked or unmasked by writing
 47 * the SIE (Supervisor Interrupt Enable) CSR.  As CSRs can only be written
 48 * on the local hart, these functions can only be called on the hart that
 49 * corresponds to the IRQ chip.
 50 */
 51
 52static void riscv_intc_irq_mask(struct irq_data *d)
 53{
 54	if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
 55		csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
 56	else
 57		csr_clear(CSR_IE, BIT(d->hwirq));
 58}
 59
 60static void riscv_intc_irq_unmask(struct irq_data *d)
 61{
 62	if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
 63		csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
 64	else
 65		csr_set(CSR_IE, BIT(d->hwirq));
 66}
 67
 68static void andes_intc_irq_mask(struct irq_data *d)
 69{
 70	/*
 71	 * Andes specific S-mode local interrupt causes (hwirq)
 72	 * are defined as (256 + n) and controlled by n-th bit
 73	 * of SLIE.
 74	 */
 75	unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
 76
 77	if (d->hwirq < ANDES_SLI_CAUSE_BASE)
 78		csr_clear(CSR_IE, mask);
 79	else
 80		csr_clear(ANDES_CSR_SLIE, mask);
 81}
 82
 83static void andes_intc_irq_unmask(struct irq_data *d)
 84{
 85	unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
 86
 87	if (d->hwirq < ANDES_SLI_CAUSE_BASE)
 88		csr_set(CSR_IE, mask);
 89	else
 90		csr_set(ANDES_CSR_SLIE, mask);
 91}
 92
 93static void riscv_intc_irq_eoi(struct irq_data *d)
 94{
 95	/*
 96	 * The RISC-V INTC driver uses handle_percpu_devid_irq() flow
 97	 * for the per-HART local interrupts and child irqchip drivers
 98	 * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement
 99	 * chained handlers for the per-HART local interrupts.
100	 *
101	 * In the absence of irq_eoi(), the chained_irq_enter() and
102	 * chained_irq_exit() functions (used by child irqchip drivers)
103	 * will do unnecessary mask/unmask of per-HART local interrupts
104	 * at the time of handling interrupts. To avoid this, we provide
105	 * an empty irq_eoi() callback for RISC-V INTC irqchip.
106	 */
107}
108
109static struct irq_chip riscv_intc_chip = {
110	.name = "RISC-V INTC",
111	.irq_mask = riscv_intc_irq_mask,
112	.irq_unmask = riscv_intc_irq_unmask,
113	.irq_eoi = riscv_intc_irq_eoi,
114};
115
116static struct irq_chip andes_intc_chip = {
117	.name		= "RISC-V INTC",
118	.irq_mask	= andes_intc_irq_mask,
119	.irq_unmask	= andes_intc_irq_unmask,
120	.irq_eoi	= riscv_intc_irq_eoi,
121};
122
123static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
124				 irq_hw_number_t hwirq)
125{
126	struct irq_chip *chip = d->host_data;
127
128	irq_set_percpu_devid(irq);
129	irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
130			    NULL, NULL);
131
132	return 0;
133}
134
135static int riscv_intc_domain_alloc(struct irq_domain *domain,
136				   unsigned int virq, unsigned int nr_irqs,
137				   void *arg)
138{
139	int i, ret;
140	irq_hw_number_t hwirq;
141	unsigned int type = IRQ_TYPE_NONE;
142	struct irq_fwspec *fwspec = arg;
143
144	ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
145	if (ret)
146		return ret;
147
148	/*
149	 * Only allow hwirq for which we have corresponding standard or
150	 * custom interrupt enable register.
151	 */
152	if (hwirq >= riscv_intc_nr_irqs &&
153	    (hwirq < riscv_intc_custom_base ||
154	     hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
155		return -EINVAL;
156
157	for (i = 0; i < nr_irqs; i++) {
158		ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
159		if (ret)
160			return ret;
161	}
162
163	return 0;
164}
165
166static const struct irq_domain_ops riscv_intc_domain_ops = {
167	.map	= riscv_intc_domain_map,
168	.xlate	= irq_domain_xlate_onecell,
169	.alloc	= riscv_intc_domain_alloc
170};
171
172static struct fwnode_handle *riscv_intc_hwnode(void)
173{
174	return intc_domain->fwnode;
175}
176
177static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip)
178{
179	int rc;
180
181	intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
 
182	if (!intc_domain) {
183		pr_err("unable to add IRQ domain\n");
184		return -ENXIO;
185	}
186
187	if (riscv_isa_extension_available(NULL, SxAIA)) {
188		riscv_intc_nr_irqs = 64;
189		rc = set_handle_irq(&riscv_intc_aia_irq);
190	} else {
191		rc = set_handle_irq(&riscv_intc_irq);
192	}
193	if (rc) {
194		pr_err("failed to set irq handler\n");
195		return rc;
196	}
197
198	riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
199
200	pr_info("%d local interrupts mapped%s\n",
201		riscv_intc_nr_irqs,
202		riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : "");
203	if (riscv_intc_custom_nr_irqs)
204		pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs);
205
206	return 0;
207}
208
209static int __init riscv_intc_init(struct device_node *node,
210				  struct device_node *parent)
211{
212	struct irq_chip *chip = &riscv_intc_chip;
213	unsigned long hartid;
214	int rc;
215
216	rc = riscv_of_parent_hartid(node, &hartid);
217	if (rc < 0) {
218		pr_warn("unable to find hart id for %pOF\n", node);
219		return 0;
220	}
221
222	/*
223	 * The DT will have one INTC DT node under each CPU (or HART)
224	 * DT node so riscv_intc_init() function will be called once
225	 * for each INTC DT node. We only need to do INTC initialization
226	 * for the INTC DT node belonging to boot CPU (or boot HART).
227	 */
228	if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
229		/*
230		 * The INTC nodes of each CPU are suppliers for downstream
231		 * interrupt controllers (such as PLIC, IMSIC and APLIC
232		 * direct-mode) so we should mark an INTC node as initialized
233		 * if we are not creating IRQ domain for it.
234		 */
235		fwnode_dev_initialized(of_fwnode_handle(node), true);
236		return 0;
237	}
238
239	if (of_device_is_compatible(node, "andestech,cpu-intc")) {
240		riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
241		riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
242		chip = &andes_intc_chip;
243	}
244
245	return riscv_intc_init_common(of_node_to_fwnode(node), chip);
246}
247
248IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
249IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
250
251#ifdef CONFIG_ACPI
252
253static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
254				       const unsigned long end)
255{
256	struct fwnode_handle *fn;
257	struct acpi_madt_rintc *rintc;
258
259	rintc = (struct acpi_madt_rintc *)header;
260
261	/*
262	 * The ACPI MADT will have one INTC for each CPU (or HART)
263	 * so riscv_intc_acpi_init() function will be called once
264	 * for each INTC. We only do INTC initialization
265	 * for the INTC belonging to the boot CPU (or boot HART).
266	 */
267	if (riscv_hartid_to_cpuid(rintc->hart_id) != smp_processor_id())
268		return 0;
269
270	fn = irq_domain_alloc_named_fwnode("RISCV-INTC");
271	if (!fn) {
272		pr_err("unable to allocate INTC FW node\n");
273		return -ENOMEM;
274	}
275
276	return riscv_intc_init_common(fn, &riscv_intc_chip);
277}
278
279IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
280		     ACPI_MADT_RINTC_VERSION_V1, riscv_intc_acpi_init);
281#endif