Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Allwinner A20/A31 SoCs NMI IRQ chip driver.
  3 *
  4 * Carlo Caione <carlo.caione@gmail.com>
  5 *
  6 * This file is licensed under the terms of the GNU General Public
  7 * License version 2.  This program is licensed "as is" without any
  8 * warranty of any kind, whether express or implied.
  9 */
 10
 11#define DRV_NAME	"sunxi-nmi"
 12#define pr_fmt(fmt)	DRV_NAME ": " fmt
 13
 14#include <linux/bitops.h>
 15#include <linux/device.h>
 16#include <linux/io.h>
 17#include <linux/irq.h>
 18#include <linux/interrupt.h>
 19#include <linux/irqdomain.h>
 20#include <linux/of_irq.h>
 21#include <linux/of_address.h>
 22#include <linux/irqchip.h>
 23#include <linux/irqchip/chained_irq.h>
 
 24
 25#define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
 26
 27#define SUNXI_NMI_IRQ_BIT	BIT(0)
 28
 29/*
 30 * For deprecated sun6i-a31-sc-nmi compatible.
 31 */
 32#define SUN6I_NMI_CTRL		0x00
 33#define SUN6I_NMI_PENDING	0x04
 34#define SUN6I_NMI_ENABLE	0x34
 35
 36#define SUN7I_NMI_CTRL		0x00
 37#define SUN7I_NMI_PENDING	0x04
 38#define SUN7I_NMI_ENABLE	0x08
 39
 40#define SUN9I_NMI_CTRL		0x00
 41#define SUN9I_NMI_ENABLE	0x04
 42#define SUN9I_NMI_PENDING	0x08
 43
 44enum {
 45	SUNXI_SRC_TYPE_LEVEL_LOW = 0,
 46	SUNXI_SRC_TYPE_EDGE_FALLING,
 47	SUNXI_SRC_TYPE_LEVEL_HIGH,
 48	SUNXI_SRC_TYPE_EDGE_RISING,
 49};
 50
 51struct sunxi_sc_nmi_reg_offs {
 52	u32 ctrl;
 53	u32 pend;
 54	u32 enable;
 55};
 56
 57static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
 58	.ctrl	= SUN6I_NMI_CTRL,
 59	.pend	= SUN6I_NMI_PENDING,
 60	.enable	= SUN6I_NMI_ENABLE,
 61};
 62
 63static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
 64	.ctrl	= SUN7I_NMI_CTRL,
 65	.pend	= SUN7I_NMI_PENDING,
 66	.enable	= SUN7I_NMI_ENABLE,
 67};
 68
 69static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
 70	.ctrl	= SUN9I_NMI_CTRL,
 71	.pend	= SUN9I_NMI_PENDING,
 72	.enable	= SUN9I_NMI_ENABLE,
 73};
 74
 75static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
 76				      u32 val)
 77{
 78	irq_reg_writel(gc, val, off);
 79}
 80
 81static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
 82{
 83	return irq_reg_readl(gc, off);
 84}
 85
 86static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
 87{
 88	struct irq_domain *domain = irq_desc_get_handler_data(desc);
 89	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 90
 91	chained_irq_enter(chip, desc);
 92	generic_handle_domain_irq(domain, 0);
 93	chained_irq_exit(chip, desc);
 94}
 95
 96static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
 97{
 98	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
 99	struct irq_chip_type *ct = gc->chip_types;
100	u32 src_type_reg;
101	u32 ctrl_off = ct->regs.type;
102	unsigned int src_type;
103	unsigned int i;
104
105	irq_gc_lock(gc);
106
107	switch (flow_type & IRQF_TRIGGER_MASK) {
108	case IRQ_TYPE_EDGE_FALLING:
109		src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
110		break;
111	case IRQ_TYPE_EDGE_RISING:
112		src_type = SUNXI_SRC_TYPE_EDGE_RISING;
113		break;
114	case IRQ_TYPE_LEVEL_HIGH:
115		src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
116		break;
117	case IRQ_TYPE_NONE:
118	case IRQ_TYPE_LEVEL_LOW:
119		src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
120		break;
121	default:
122		irq_gc_unlock(gc);
123		pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
124			data->irq);
125		return -EBADR;
126	}
127
128	irqd_set_trigger_type(data, flow_type);
129	irq_setup_alt_chip(data, flow_type);
130
131	for (i = 0; i < gc->num_ct; i++, ct++)
132		if (ct->type & flow_type)
133			ctrl_off = ct->regs.type;
134
135	src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
136	src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
137	src_type_reg |= src_type;
138	sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
139
140	irq_gc_unlock(gc);
141
142	return IRQ_SET_MASK_OK;
143}
144
145static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
146					const struct sunxi_sc_nmi_reg_offs *reg_offs)
147{
148	struct irq_domain *domain;
149	struct irq_chip_generic *gc;
150	unsigned int irq;
151	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
152	int ret;
153
154
155	domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
156	if (!domain) {
157		pr_err("Could not register interrupt domain.\n");
158		return -ENOMEM;
159	}
160
161	ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
162					     handle_fasteoi_irq, clr, 0,
163					     IRQ_GC_INIT_MASK_CACHE);
164	if (ret) {
165		pr_err("Could not allocate generic interrupt chip.\n");
166		goto fail_irqd_remove;
 
167	}
168
169	irq = irq_of_parse_and_map(node, 0);
170	if (irq <= 0) {
171		pr_err("unable to parse irq\n");
172		ret = -EINVAL;
173		goto fail_irqd_remove;
174	}
175
176	gc = irq_get_domain_generic_chip(domain, 0);
177	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
178	if (IS_ERR(gc->reg_base)) {
179		pr_err("unable to map resource\n");
180		ret = PTR_ERR(gc->reg_base);
181		goto fail_irqd_remove;
182	}
183
184	gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
185	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
186	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
187	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
188	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
189	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
190	gc->chip_types[0].regs.ack		= reg_offs->pend;
191	gc->chip_types[0].regs.mask		= reg_offs->enable;
192	gc->chip_types[0].regs.type		= reg_offs->ctrl;
193
194	gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
195	gc->chip_types[1].chip.name		= gc->chip_types[0].chip.name;
196	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
197	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
198	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
199	gc->chip_types[1].chip.irq_set_type	= sunxi_sc_nmi_set_type;
200	gc->chip_types[1].regs.ack		= reg_offs->pend;
201	gc->chip_types[1].regs.mask		= reg_offs->enable;
202	gc->chip_types[1].regs.type		= reg_offs->ctrl;
203	gc->chip_types[1].handler		= handle_edge_irq;
204
205	/* Disable any active interrupts */
206	sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
 
207
208	/* Clear any pending NMI interrupts */
209	sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
210
211	irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
212
213	return 0;
214
215fail_irqd_remove:
216	irq_domain_remove(domain);
217
218	return ret;
219}
220
221static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
222					struct device_node *parent)
223{
224	return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
225}
226IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
227
228static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
229					struct device_node *parent)
230{
231	return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
232}
233IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
234
235static int __init sun9i_nmi_irq_init(struct device_node *node,
236				     struct device_node *parent)
237{
238	return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
239}
240IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
v3.15
  1/*
  2 * Allwinner A20/A31 SoCs NMI IRQ chip driver.
  3 *
  4 * Carlo Caione <carlo.caione@gmail.com>
  5 *
  6 * This file is licensed under the terms of the GNU General Public
  7 * License version 2.  This program is licensed "as is" without any
  8 * warranty of any kind, whether express or implied.
  9 */
 10
 
 
 
 11#include <linux/bitops.h>
 12#include <linux/device.h>
 13#include <linux/io.h>
 14#include <linux/irq.h>
 15#include <linux/interrupt.h>
 16#include <linux/irqdomain.h>
 17#include <linux/of_irq.h>
 18#include <linux/of_address.h>
 19#include <linux/of_platform.h>
 20#include <linux/irqchip/chained_irq.h>
 21#include "irqchip.h"
 22
 23#define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
 24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25enum {
 26	SUNXI_SRC_TYPE_LEVEL_LOW = 0,
 27	SUNXI_SRC_TYPE_EDGE_FALLING,
 28	SUNXI_SRC_TYPE_LEVEL_HIGH,
 29	SUNXI_SRC_TYPE_EDGE_RISING,
 30};
 31
 32struct sunxi_sc_nmi_reg_offs {
 33	u32 ctrl;
 34	u32 pend;
 35	u32 enable;
 36};
 37
 38static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
 39	.ctrl	= 0x00,
 40	.pend	= 0x04,
 41	.enable	= 0x08,
 
 
 
 
 
 
 42};
 43
 44static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
 45	.ctrl	= 0x00,
 46	.pend	= 0x04,
 47	.enable	= 0x34,
 48};
 49
 50static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
 51				      u32 val)
 52{
 53	irq_reg_writel(val, gc->reg_base + off);
 54}
 55
 56static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
 57{
 58	return irq_reg_readl(gc->reg_base + off);
 59}
 60
 61static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
 62{
 63	struct irq_domain *domain = irq_desc_get_handler_data(desc);
 64	struct irq_chip *chip = irq_get_chip(irq);
 65	unsigned int virq = irq_find_mapping(domain, 0);
 66
 67	chained_irq_enter(chip, desc);
 68	generic_handle_irq(virq);
 69	chained_irq_exit(chip, desc);
 70}
 71
 72static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
 73{
 74	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
 75	struct irq_chip_type *ct = gc->chip_types;
 76	u32 src_type_reg;
 77	u32 ctrl_off = ct->regs.type;
 78	unsigned int src_type;
 79	unsigned int i;
 80
 81	irq_gc_lock(gc);
 82
 83	switch (flow_type & IRQF_TRIGGER_MASK) {
 84	case IRQ_TYPE_EDGE_FALLING:
 85		src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
 86		break;
 87	case IRQ_TYPE_EDGE_RISING:
 88		src_type = SUNXI_SRC_TYPE_EDGE_RISING;
 89		break;
 90	case IRQ_TYPE_LEVEL_HIGH:
 91		src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
 92		break;
 93	case IRQ_TYPE_NONE:
 94	case IRQ_TYPE_LEVEL_LOW:
 95		src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
 96		break;
 97	default:
 98		irq_gc_unlock(gc);
 99		pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
100			__func__, data->irq);
101		return -EBADR;
102	}
103
104	irqd_set_trigger_type(data, flow_type);
105	irq_setup_alt_chip(data, flow_type);
106
107	for (i = 0; i <= gc->num_ct; i++, ct++)
108		if (ct->type & flow_type)
109			ctrl_off = ct->regs.type;
110
111	src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
112	src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
113	src_type_reg |= src_type;
114	sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
115
116	irq_gc_unlock(gc);
117
118	return IRQ_SET_MASK_OK;
119}
120
121static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
122					struct sunxi_sc_nmi_reg_offs *reg_offs)
123{
124	struct irq_domain *domain;
125	struct irq_chip_generic *gc;
126	unsigned int irq;
127	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
128	int ret;
129
130
131	domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
132	if (!domain) {
133		pr_err("%s: Could not register interrupt domain.\n", node->name);
134		return -ENOMEM;
135	}
136
137	ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name,
138					     handle_fasteoi_irq, clr, 0,
139					     IRQ_GC_INIT_MASK_CACHE);
140	if (ret) {
141		 pr_err("%s: Could not allocate generic interrupt chip.\n",
142			 node->name);
143		 goto fail_irqd_remove;
144	}
145
146	irq = irq_of_parse_and_map(node, 0);
147	if (irq <= 0) {
148		pr_err("%s: unable to parse irq\n", node->name);
149		ret = -EINVAL;
150		goto fail_irqd_remove;
151	}
152
153	gc = irq_get_domain_generic_chip(domain, 0);
154	gc->reg_base = of_iomap(node, 0);
155	if (!gc->reg_base) {
156		pr_err("%s: unable to map resource\n", node->name);
157		ret = -ENOMEM;
158		goto fail_irqd_remove;
159	}
160
161	gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
162	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
163	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
164	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
165	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
166	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
167	gc->chip_types[0].regs.ack		= reg_offs->pend;
168	gc->chip_types[0].regs.mask		= reg_offs->enable;
169	gc->chip_types[0].regs.type		= reg_offs->ctrl;
170
171	gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
172	gc->chip_types[1].chip.name		= gc->chip_types[0].chip.name;
173	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
174	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
175	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
176	gc->chip_types[1].chip.irq_set_type	= sunxi_sc_nmi_set_type;
177	gc->chip_types[1].regs.ack		= reg_offs->pend;
178	gc->chip_types[1].regs.mask		= reg_offs->enable;
179	gc->chip_types[1].regs.type		= reg_offs->ctrl;
180	gc->chip_types[1].handler		= handle_edge_irq;
181
 
182	sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
183	sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
184
185	irq_set_handler_data(irq, domain);
186	irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq);
 
 
187
188	return 0;
189
190fail_irqd_remove:
191	irq_domain_remove(domain);
192
193	return ret;
194}
195
196static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
197					struct device_node *parent)
198{
199	return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
200}
201IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
202
203static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
204					struct device_node *parent)
205{
206	return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
207}
208IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);