Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Xtensa MX interrupt distributor
  3 *
  4 * Copyright (C) 2002 - 2013 Tensilica, Inc.
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License.  See the file "COPYING" in the main directory of this archive
  8 * for more details.
  9 */
 10
 11#include <linux/interrupt.h>
 12#include <linux/irqdomain.h>
 13#include <linux/irq.h>
 
 14#include <linux/of.h>
 15
 16#include <asm/mxregs.h>
 17
 18#include "irqchip.h"
 19
 20#define HW_IRQ_IPI_COUNT 2
 21#define HW_IRQ_MX_BASE 2
 22#define HW_IRQ_EXTERN_BASE 3
 23
 24static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
 25
 26static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
 27		irq_hw_number_t hw)
 28{
 29	if (hw < HW_IRQ_IPI_COUNT) {
 30		struct irq_chip *irq_chip = d->host_data;
 31		irq_set_chip_and_handler_name(irq, irq_chip,
 32				handle_percpu_irq, "ipi");
 33		irq_set_status_flags(irq, IRQ_LEVEL);
 34		return 0;
 35	}
 
 36	return xtensa_irq_map(d, irq, hw);
 37}
 38
 39/*
 40 * Device Tree IRQ specifier translation function which works with one or
 41 * two cell bindings. First cell value maps directly to the hwirq number.
 42 * Second cell if present specifies whether hwirq number is external (1) or
 43 * internal (0).
 44 */
 45static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
 46		struct device_node *ctrlr,
 47		const u32 *intspec, unsigned int intsize,
 48		unsigned long *out_hwirq, unsigned int *out_type)
 49{
 50	return xtensa_irq_domain_xlate(intspec, intsize,
 51			intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
 52			out_hwirq, out_type);
 53}
 54
 55static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
 56	.xlate = xtensa_mx_irq_domain_xlate,
 57	.map = xtensa_mx_irq_map,
 58};
 59
 60void secondary_init_irq(void)
 61{
 62	__this_cpu_write(cached_irq_mask,
 63			XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 64			XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
 65	set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 66			XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
 67}
 68
 69static void xtensa_mx_irq_mask(struct irq_data *d)
 70{
 71	unsigned int mask = 1u << d->hwirq;
 72
 73	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 74				XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
 75		set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
 76					HW_IRQ_MX_BASE), MIENG);
 77	} else {
 78		mask = __this_cpu_read(cached_irq_mask) & ~mask;
 79		__this_cpu_write(cached_irq_mask, mask);
 80		set_sr(mask, intenable);
 81	}
 
 
 
 82}
 83
 84static void xtensa_mx_irq_unmask(struct irq_data *d)
 85{
 86	unsigned int mask = 1u << d->hwirq;
 87
 88	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 89				XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
 90		set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
 91					HW_IRQ_MX_BASE), MIENGSET);
 92	} else {
 93		mask |= __this_cpu_read(cached_irq_mask);
 94		__this_cpu_write(cached_irq_mask, mask);
 95		set_sr(mask, intenable);
 96	}
 
 
 
 97}
 98
 99static void xtensa_mx_irq_enable(struct irq_data *d)
100{
101	variant_irq_enable(d->hwirq);
102	xtensa_mx_irq_unmask(d);
103}
104
105static void xtensa_mx_irq_disable(struct irq_data *d)
106{
107	xtensa_mx_irq_mask(d);
108	variant_irq_disable(d->hwirq);
109}
110
111static void xtensa_mx_irq_ack(struct irq_data *d)
112{
113	set_sr(1 << d->hwirq, intclear);
114}
115
116static int xtensa_mx_irq_retrigger(struct irq_data *d)
117{
118	set_sr(1 << d->hwirq, intset);
 
 
 
 
119	return 1;
120}
121
122static int xtensa_mx_irq_set_affinity(struct irq_data *d,
123		const struct cpumask *dest, bool force)
124{
125	unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
 
126
127	set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
 
 
128	return 0;
129
130}
131
132static struct irq_chip xtensa_mx_irq_chip = {
133	.name		= "xtensa-mx",
134	.irq_enable	= xtensa_mx_irq_enable,
135	.irq_disable	= xtensa_mx_irq_disable,
136	.irq_mask	= xtensa_mx_irq_mask,
137	.irq_unmask	= xtensa_mx_irq_unmask,
138	.irq_ack	= xtensa_mx_irq_ack,
139	.irq_retrigger	= xtensa_mx_irq_retrigger,
140	.irq_set_affinity = xtensa_mx_irq_set_affinity,
141};
142
143int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
144{
145	struct irq_domain *root_domain =
146		irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
147				&xtensa_mx_irq_domain_ops,
148				&xtensa_mx_irq_chip);
149	irq_set_default_host(root_domain);
150	secondary_init_irq();
151	return 0;
152}
153
154static int __init xtensa_mx_init(struct device_node *np,
155		struct device_node *interrupt_parent)
156{
157	struct irq_domain *root_domain =
158		irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
159				&xtensa_mx_irq_chip);
160	irq_set_default_host(root_domain);
161	secondary_init_irq();
162	return 0;
163}
164IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
v5.14.15
  1/*
  2 * Xtensa MX interrupt distributor
  3 *
  4 * Copyright (C) 2002 - 2013 Tensilica, Inc.
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License.  See the file "COPYING" in the main directory of this archive
  8 * for more details.
  9 */
 10
 11#include <linux/interrupt.h>
 12#include <linux/irqdomain.h>
 13#include <linux/irq.h>
 14#include <linux/irqchip.h>
 15#include <linux/of.h>
 16
 17#include <asm/mxregs.h>
 18
 
 
 19#define HW_IRQ_IPI_COUNT 2
 20#define HW_IRQ_MX_BASE 2
 21#define HW_IRQ_EXTERN_BASE 3
 22
 23static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
 24
 25static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
 26		irq_hw_number_t hw)
 27{
 28	if (hw < HW_IRQ_IPI_COUNT) {
 29		struct irq_chip *irq_chip = d->host_data;
 30		irq_set_chip_and_handler_name(irq, irq_chip,
 31				handle_percpu_irq, "ipi");
 32		irq_set_status_flags(irq, IRQ_LEVEL);
 33		return 0;
 34	}
 35	irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
 36	return xtensa_irq_map(d, irq, hw);
 37}
 38
 39/*
 40 * Device Tree IRQ specifier translation function which works with one or
 41 * two cell bindings. First cell value maps directly to the hwirq number.
 42 * Second cell if present specifies whether hwirq number is external (1) or
 43 * internal (0).
 44 */
 45static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
 46		struct device_node *ctrlr,
 47		const u32 *intspec, unsigned int intsize,
 48		unsigned long *out_hwirq, unsigned int *out_type)
 49{
 50	return xtensa_irq_domain_xlate(intspec, intsize,
 51			intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
 52			out_hwirq, out_type);
 53}
 54
 55static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
 56	.xlate = xtensa_mx_irq_domain_xlate,
 57	.map = xtensa_mx_irq_map,
 58};
 59
 60void secondary_init_irq(void)
 61{
 62	__this_cpu_write(cached_irq_mask,
 63			XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 64			XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
 65	xtensa_set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 66			XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
 67}
 68
 69static void xtensa_mx_irq_mask(struct irq_data *d)
 70{
 71	unsigned int mask = 1u << d->hwirq;
 72
 73	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 74		    XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
 75		unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
 76
 77		if (ext_irq >= HW_IRQ_MX_BASE) {
 78			set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
 79			return;
 80		}
 81	}
 82	mask = __this_cpu_read(cached_irq_mask) & ~mask;
 83	__this_cpu_write(cached_irq_mask, mask);
 84	xtensa_set_sr(mask, intenable);
 85}
 86
 87static void xtensa_mx_irq_unmask(struct irq_data *d)
 88{
 89	unsigned int mask = 1u << d->hwirq;
 90
 91	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 92		    XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
 93		unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
 94
 95		if (ext_irq >= HW_IRQ_MX_BASE) {
 96			set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
 97			return;
 98		}
 99	}
100	mask |= __this_cpu_read(cached_irq_mask);
101	__this_cpu_write(cached_irq_mask, mask);
102	xtensa_set_sr(mask, intenable);
103}
104
105static void xtensa_mx_irq_enable(struct irq_data *d)
106{
 
107	xtensa_mx_irq_unmask(d);
108}
109
110static void xtensa_mx_irq_disable(struct irq_data *d)
111{
112	xtensa_mx_irq_mask(d);
 
113}
114
115static void xtensa_mx_irq_ack(struct irq_data *d)
116{
117	xtensa_set_sr(1 << d->hwirq, intclear);
118}
119
120static int xtensa_mx_irq_retrigger(struct irq_data *d)
121{
122	unsigned int mask = 1u << d->hwirq;
123
124	if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
125		return 0;
126	xtensa_set_sr(mask, intset);
127	return 1;
128}
129
130static int xtensa_mx_irq_set_affinity(struct irq_data *d,
131		const struct cpumask *dest, bool force)
132{
133	int cpu = cpumask_any_and(dest, cpu_online_mask);
134	unsigned mask = 1u << cpu;
135
136	set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
137	irq_data_update_effective_affinity(d, cpumask_of(cpu));
138
139	return 0;
140
141}
142
143static struct irq_chip xtensa_mx_irq_chip = {
144	.name		= "xtensa-mx",
145	.irq_enable	= xtensa_mx_irq_enable,
146	.irq_disable	= xtensa_mx_irq_disable,
147	.irq_mask	= xtensa_mx_irq_mask,
148	.irq_unmask	= xtensa_mx_irq_unmask,
149	.irq_ack	= xtensa_mx_irq_ack,
150	.irq_retrigger	= xtensa_mx_irq_retrigger,
151	.irq_set_affinity = xtensa_mx_irq_set_affinity,
152};
153
154int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
155{
156	struct irq_domain *root_domain =
157		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
158				&xtensa_mx_irq_domain_ops,
159				&xtensa_mx_irq_chip);
160	irq_set_default_host(root_domain);
161	secondary_init_irq();
162	return 0;
163}
164
165static int __init xtensa_mx_init(struct device_node *np,
166		struct device_node *interrupt_parent)
167{
168	struct irq_domain *root_domain =
169		irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
170				&xtensa_mx_irq_chip);
171	irq_set_default_host(root_domain);
172	secondary_init_irq();
173	return 0;
174}
175IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);