Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Allwinner A20/A31 SoCs NMI IRQ chip driver.
  3 *
  4 * Carlo Caione <carlo.caione@gmail.com>
  5 *
  6 * This file is licensed under the terms of the GNU General Public
  7 * License version 2.  This program is licensed "as is" without any
  8 * warranty of any kind, whether express or implied.
  9 */
 10
 11#define DRV_NAME	"sunxi-nmi"
 12#define pr_fmt(fmt)	DRV_NAME ": " fmt
 13
 14#include <linux/bitops.h>
 15#include <linux/device.h>
 16#include <linux/io.h>
 17#include <linux/irq.h>
 18#include <linux/interrupt.h>
 19#include <linux/irqdomain.h>
 20#include <linux/of_irq.h>
 21#include <linux/of_address.h>
 22#include <linux/of_platform.h>
 23#include <linux/irqchip.h>
 24#include <linux/irqchip/chained_irq.h>
 25
 26#define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
 27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28enum {
 29	SUNXI_SRC_TYPE_LEVEL_LOW = 0,
 30	SUNXI_SRC_TYPE_EDGE_FALLING,
 31	SUNXI_SRC_TYPE_LEVEL_HIGH,
 32	SUNXI_SRC_TYPE_EDGE_RISING,
 33};
 34
 35struct sunxi_sc_nmi_reg_offs {
 36	u32 ctrl;
 37	u32 pend;
 38	u32 enable;
 39};
 40
 41static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
 42	.ctrl	= 0x00,
 43	.pend	= 0x04,
 44	.enable	= 0x08,
 45};
 46
 47static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
 48	.ctrl	= 0x00,
 49	.pend	= 0x04,
 50	.enable	= 0x34,
 51};
 52
 53static struct sunxi_sc_nmi_reg_offs sun9i_reg_offs = {
 54	.ctrl	= 0x00,
 55	.pend	= 0x08,
 56	.enable	= 0x04,
 57};
 58
 59static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
 60				      u32 val)
 61{
 62	irq_reg_writel(gc, val, off);
 63}
 64
 65static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
 66{
 67	return irq_reg_readl(gc, off);
 68}
 69
 70static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
 71{
 72	struct irq_domain *domain = irq_desc_get_handler_data(desc);
 73	struct irq_chip *chip = irq_desc_get_chip(desc);
 74	unsigned int virq = irq_find_mapping(domain, 0);
 75
 76	chained_irq_enter(chip, desc);
 77	generic_handle_irq(virq);
 78	chained_irq_exit(chip, desc);
 79}
 80
 81static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
 82{
 83	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
 84	struct irq_chip_type *ct = gc->chip_types;
 85	u32 src_type_reg;
 86	u32 ctrl_off = ct->regs.type;
 87	unsigned int src_type;
 88	unsigned int i;
 89
 90	irq_gc_lock(gc);
 91
 92	switch (flow_type & IRQF_TRIGGER_MASK) {
 93	case IRQ_TYPE_EDGE_FALLING:
 94		src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
 95		break;
 96	case IRQ_TYPE_EDGE_RISING:
 97		src_type = SUNXI_SRC_TYPE_EDGE_RISING;
 98		break;
 99	case IRQ_TYPE_LEVEL_HIGH:
100		src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
101		break;
102	case IRQ_TYPE_NONE:
103	case IRQ_TYPE_LEVEL_LOW:
104		src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
105		break;
106	default:
107		irq_gc_unlock(gc);
108		pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
109			data->irq);
110		return -EBADR;
111	}
112
113	irqd_set_trigger_type(data, flow_type);
114	irq_setup_alt_chip(data, flow_type);
115
116	for (i = 0; i < gc->num_ct; i++, ct++)
117		if (ct->type & flow_type)
118			ctrl_off = ct->regs.type;
119
120	src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
121	src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
122	src_type_reg |= src_type;
123	sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
124
125	irq_gc_unlock(gc);
126
127	return IRQ_SET_MASK_OK;
128}
129
130static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
131					struct sunxi_sc_nmi_reg_offs *reg_offs)
132{
133	struct irq_domain *domain;
134	struct irq_chip_generic *gc;
135	unsigned int irq;
136	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
137	int ret;
138
139
140	domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
141	if (!domain) {
142		pr_err("Could not register interrupt domain.\n");
143		return -ENOMEM;
144	}
145
146	ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
147					     handle_fasteoi_irq, clr, 0,
148					     IRQ_GC_INIT_MASK_CACHE);
149	if (ret) {
150		pr_err("Could not allocate generic interrupt chip.\n");
151		goto fail_irqd_remove;
152	}
153
154	irq = irq_of_parse_and_map(node, 0);
155	if (irq <= 0) {
156		pr_err("unable to parse irq\n");
157		ret = -EINVAL;
158		goto fail_irqd_remove;
159	}
160
161	gc = irq_get_domain_generic_chip(domain, 0);
162	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
163	if (IS_ERR(gc->reg_base)) {
164		pr_err("unable to map resource\n");
165		ret = PTR_ERR(gc->reg_base);
166		goto fail_irqd_remove;
167	}
168
169	gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
170	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
171	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
172	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
173	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
174	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
175	gc->chip_types[0].regs.ack		= reg_offs->pend;
176	gc->chip_types[0].regs.mask		= reg_offs->enable;
177	gc->chip_types[0].regs.type		= reg_offs->ctrl;
178
179	gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
180	gc->chip_types[1].chip.name		= gc->chip_types[0].chip.name;
181	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
182	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
183	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
184	gc->chip_types[1].chip.irq_set_type	= sunxi_sc_nmi_set_type;
185	gc->chip_types[1].regs.ack		= reg_offs->pend;
186	gc->chip_types[1].regs.mask		= reg_offs->enable;
187	gc->chip_types[1].regs.type		= reg_offs->ctrl;
188	gc->chip_types[1].handler		= handle_edge_irq;
189
 
190	sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
191	sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
 
 
192
193	irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
194
195	return 0;
196
197fail_irqd_remove:
198	irq_domain_remove(domain);
199
200	return ret;
201}
202
203static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
204					struct device_node *parent)
205{
206	return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
207}
208IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
209
210static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
211					struct device_node *parent)
212{
213	return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
214}
215IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
216
217static int __init sun9i_nmi_irq_init(struct device_node *node,
218				     struct device_node *parent)
219{
220	return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
221}
222IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
v6.2
  1/*
  2 * Allwinner A20/A31 SoCs NMI IRQ chip driver.
  3 *
  4 * Carlo Caione <carlo.caione@gmail.com>
  5 *
  6 * This file is licensed under the terms of the GNU General Public
  7 * License version 2.  This program is licensed "as is" without any
  8 * warranty of any kind, whether express or implied.
  9 */
 10
 11#define DRV_NAME	"sunxi-nmi"
 12#define pr_fmt(fmt)	DRV_NAME ": " fmt
 13
 14#include <linux/bitops.h>
 15#include <linux/device.h>
 16#include <linux/io.h>
 17#include <linux/irq.h>
 18#include <linux/interrupt.h>
 19#include <linux/irqdomain.h>
 20#include <linux/of_irq.h>
 21#include <linux/of_address.h>
 22#include <linux/of_platform.h>
 23#include <linux/irqchip.h>
 24#include <linux/irqchip/chained_irq.h>
 25
 26#define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
 27
 28#define SUNXI_NMI_IRQ_BIT	BIT(0)
 29
 30/*
 31 * For deprecated sun6i-a31-sc-nmi compatible.
 32 */
 33#define SUN6I_NMI_CTRL		0x00
 34#define SUN6I_NMI_PENDING	0x04
 35#define SUN6I_NMI_ENABLE	0x34
 36
 37#define SUN7I_NMI_CTRL		0x00
 38#define SUN7I_NMI_PENDING	0x04
 39#define SUN7I_NMI_ENABLE	0x08
 40
 41#define SUN9I_NMI_CTRL		0x00
 42#define SUN9I_NMI_ENABLE	0x04
 43#define SUN9I_NMI_PENDING	0x08
 44
 45enum {
 46	SUNXI_SRC_TYPE_LEVEL_LOW = 0,
 47	SUNXI_SRC_TYPE_EDGE_FALLING,
 48	SUNXI_SRC_TYPE_LEVEL_HIGH,
 49	SUNXI_SRC_TYPE_EDGE_RISING,
 50};
 51
 52struct sunxi_sc_nmi_reg_offs {
 53	u32 ctrl;
 54	u32 pend;
 55	u32 enable;
 56};
 57
 58static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
 59	.ctrl	= SUN6I_NMI_CTRL,
 60	.pend	= SUN6I_NMI_PENDING,
 61	.enable	= SUN6I_NMI_ENABLE,
 62};
 63
 64static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
 65	.ctrl	= SUN7I_NMI_CTRL,
 66	.pend	= SUN7I_NMI_PENDING,
 67	.enable	= SUN7I_NMI_ENABLE,
 68};
 69
 70static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
 71	.ctrl	= SUN9I_NMI_CTRL,
 72	.pend	= SUN9I_NMI_PENDING,
 73	.enable	= SUN9I_NMI_ENABLE,
 74};
 75
 76static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
 77				      u32 val)
 78{
 79	irq_reg_writel(gc, val, off);
 80}
 81
 82static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
 83{
 84	return irq_reg_readl(gc, off);
 85}
 86
 87static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
 88{
 89	struct irq_domain *domain = irq_desc_get_handler_data(desc);
 90	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 91
 92	chained_irq_enter(chip, desc);
 93	generic_handle_domain_irq(domain, 0);
 94	chained_irq_exit(chip, desc);
 95}
 96
 97static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
 98{
 99	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
100	struct irq_chip_type *ct = gc->chip_types;
101	u32 src_type_reg;
102	u32 ctrl_off = ct->regs.type;
103	unsigned int src_type;
104	unsigned int i;
105
106	irq_gc_lock(gc);
107
108	switch (flow_type & IRQF_TRIGGER_MASK) {
109	case IRQ_TYPE_EDGE_FALLING:
110		src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
111		break;
112	case IRQ_TYPE_EDGE_RISING:
113		src_type = SUNXI_SRC_TYPE_EDGE_RISING;
114		break;
115	case IRQ_TYPE_LEVEL_HIGH:
116		src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
117		break;
118	case IRQ_TYPE_NONE:
119	case IRQ_TYPE_LEVEL_LOW:
120		src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
121		break;
122	default:
123		irq_gc_unlock(gc);
124		pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
125			data->irq);
126		return -EBADR;
127	}
128
129	irqd_set_trigger_type(data, flow_type);
130	irq_setup_alt_chip(data, flow_type);
131
132	for (i = 0; i < gc->num_ct; i++, ct++)
133		if (ct->type & flow_type)
134			ctrl_off = ct->regs.type;
135
136	src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
137	src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
138	src_type_reg |= src_type;
139	sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
140
141	irq_gc_unlock(gc);
142
143	return IRQ_SET_MASK_OK;
144}
145
146static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
147					const struct sunxi_sc_nmi_reg_offs *reg_offs)
148{
149	struct irq_domain *domain;
150	struct irq_chip_generic *gc;
151	unsigned int irq;
152	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
153	int ret;
154
155
156	domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
157	if (!domain) {
158		pr_err("Could not register interrupt domain.\n");
159		return -ENOMEM;
160	}
161
162	ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
163					     handle_fasteoi_irq, clr, 0,
164					     IRQ_GC_INIT_MASK_CACHE);
165	if (ret) {
166		pr_err("Could not allocate generic interrupt chip.\n");
167		goto fail_irqd_remove;
168	}
169
170	irq = irq_of_parse_and_map(node, 0);
171	if (irq <= 0) {
172		pr_err("unable to parse irq\n");
173		ret = -EINVAL;
174		goto fail_irqd_remove;
175	}
176
177	gc = irq_get_domain_generic_chip(domain, 0);
178	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
179	if (IS_ERR(gc->reg_base)) {
180		pr_err("unable to map resource\n");
181		ret = PTR_ERR(gc->reg_base);
182		goto fail_irqd_remove;
183	}
184
185	gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
186	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
187	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
188	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
189	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
190	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
191	gc->chip_types[0].regs.ack		= reg_offs->pend;
192	gc->chip_types[0].regs.mask		= reg_offs->enable;
193	gc->chip_types[0].regs.type		= reg_offs->ctrl;
194
195	gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
196	gc->chip_types[1].chip.name		= gc->chip_types[0].chip.name;
197	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
198	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
199	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
200	gc->chip_types[1].chip.irq_set_type	= sunxi_sc_nmi_set_type;
201	gc->chip_types[1].regs.ack		= reg_offs->pend;
202	gc->chip_types[1].regs.mask		= reg_offs->enable;
203	gc->chip_types[1].regs.type		= reg_offs->ctrl;
204	gc->chip_types[1].handler		= handle_edge_irq;
205
206	/* Disable any active interrupts */
207	sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
208
209	/* Clear any pending NMI interrupts */
210	sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
211
212	irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
213
214	return 0;
215
216fail_irqd_remove:
217	irq_domain_remove(domain);
218
219	return ret;
220}
221
222static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
223					struct device_node *parent)
224{
225	return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
226}
227IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
228
229static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
230					struct device_node *parent)
231{
232	return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
233}
234IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
235
236static int __init sun9i_nmi_irq_init(struct device_node *node,
237				     struct device_node *parent)
238{
239	return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
240}
241IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);