Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2016 MediaTek Inc.
  4 * Author: Youlin.Pei <youlin.pei@mediatek.com>
  5 */
  6
  7#include <linux/interrupt.h>
  8#include <linux/io.h>
  9#include <linux/irq.h>
 10#include <linux/irqchip.h>
 11#include <linux/irqdomain.h>
 12#include <linux/of.h>
 13#include <linux/of_irq.h>
 14#include <linux/of_address.h>
 15#include <linux/slab.h>
 16#include <linux/syscore_ops.h>
 17
 18#define CIRQ_ACK	0x40
 19#define CIRQ_MASK_SET	0xc0
 20#define CIRQ_MASK_CLR	0x100
 21#define CIRQ_SENS_SET	0x180
 22#define CIRQ_SENS_CLR	0x1c0
 23#define CIRQ_POL_SET	0x240
 24#define CIRQ_POL_CLR	0x280
 25#define CIRQ_CONTROL	0x300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27#define CIRQ_EN	0x1
 28#define CIRQ_EDGE	0x2
 29#define CIRQ_FLUSH	0x4
 30
 31struct mtk_cirq_chip_data {
 32	void __iomem *base;
 33	unsigned int ext_irq_start;
 34	unsigned int ext_irq_end;
 
 35	struct irq_domain *domain;
 36};
 37
 38static struct mtk_cirq_chip_data *cirq_data;
 39
 40static void mtk_cirq_write_mask(struct irq_data *data, unsigned int offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 41{
 42	struct mtk_cirq_chip_data *chip_data = data->chip_data;
 43	unsigned int cirq_num = data->hwirq;
 44	u32 mask = 1 << (cirq_num % 32);
 45
 46	writel_relaxed(mask, chip_data->base + offset + (cirq_num / 32) * 4);
 47}
 48
 49static void mtk_cirq_mask(struct irq_data *data)
 50{
 51	mtk_cirq_write_mask(data, CIRQ_MASK_SET);
 52	irq_chip_mask_parent(data);
 53}
 54
 55static void mtk_cirq_unmask(struct irq_data *data)
 56{
 57	mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
 58	irq_chip_unmask_parent(data);
 59}
 60
 61static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
 62{
 63	int ret;
 64
 65	switch (type & IRQ_TYPE_SENSE_MASK) {
 66	case IRQ_TYPE_EDGE_FALLING:
 67		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
 68		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
 69		break;
 70	case IRQ_TYPE_EDGE_RISING:
 71		mtk_cirq_write_mask(data, CIRQ_POL_SET);
 72		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
 73		break;
 74	case IRQ_TYPE_LEVEL_LOW:
 75		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
 76		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
 77		break;
 78	case IRQ_TYPE_LEVEL_HIGH:
 79		mtk_cirq_write_mask(data, CIRQ_POL_SET);
 80		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
 81		break;
 82	default:
 83		break;
 84	}
 85
 86	data = data->parent_data;
 87	ret = data->chip->irq_set_type(data, type);
 88	return ret;
 89}
 90
 91static struct irq_chip mtk_cirq_chip = {
 92	.name			= "MT_CIRQ",
 93	.irq_mask		= mtk_cirq_mask,
 94	.irq_unmask		= mtk_cirq_unmask,
 95	.irq_eoi		= irq_chip_eoi_parent,
 96	.irq_set_type		= mtk_cirq_set_type,
 97	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 98#ifdef CONFIG_SMP
 99	.irq_set_affinity	= irq_chip_set_affinity_parent,
100#endif
101};
102
103static int mtk_cirq_domain_translate(struct irq_domain *d,
104				     struct irq_fwspec *fwspec,
105				     unsigned long *hwirq,
106				     unsigned int *type)
107{
108	if (is_of_node(fwspec->fwnode)) {
109		if (fwspec->param_count != 3)
110			return -EINVAL;
111
112		/* No PPI should point to this domain */
113		if (fwspec->param[0] != 0)
114			return -EINVAL;
115
116		/* cirq support irq number check */
117		if (fwspec->param[1] < cirq_data->ext_irq_start ||
118		    fwspec->param[1] > cirq_data->ext_irq_end)
119			return -EINVAL;
120
121		*hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
122		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
123		return 0;
124	}
125
126	return -EINVAL;
127}
128
129static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
130				 unsigned int nr_irqs, void *arg)
131{
132	int ret;
133	irq_hw_number_t hwirq;
134	unsigned int type;
135	struct irq_fwspec *fwspec = arg;
136	struct irq_fwspec parent_fwspec = *fwspec;
137
138	ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
139	if (ret)
140		return ret;
141
142	if (WARN_ON(nr_irqs != 1))
143		return -EINVAL;
144
145	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
146				      &mtk_cirq_chip,
147				      domain->host_data);
148
149	parent_fwspec.fwnode = domain->parent->fwnode;
150	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
151					    &parent_fwspec);
152}
153
154static const struct irq_domain_ops cirq_domain_ops = {
155	.translate	= mtk_cirq_domain_translate,
156	.alloc		= mtk_cirq_domain_alloc,
157	.free		= irq_domain_free_irqs_common,
158};
159
160#ifdef CONFIG_PM_SLEEP
161static int mtk_cirq_suspend(void)
162{
 
163	u32 value, mask;
164	unsigned int irq, hwirq_num;
165	bool pending, masked;
166	int i, pendret, maskret;
167
168	/*
169	 * When external interrupts happened, CIRQ will record the status
170	 * even CIRQ is not enabled. When execute flush command, CIRQ will
171	 * resend the signals according to the status. So if don't clear the
172	 * status, CIRQ will resend the wrong signals.
173	 *
174	 * arch_suspend_disable_irqs() will be called before CIRQ suspend
175	 * callback. If clear all the status simply, the external interrupts
176	 * which happened between arch_suspend_disable_irqs and CIRQ suspend
177	 * callback will be lost. Using following steps to avoid this issue;
178	 *
179	 * - Iterate over all the CIRQ supported interrupts;
180	 * - For each interrupt, inspect its pending and masked status at GIC
181	 *   level;
182	 * - If pending and unmasked, it happened between
183	 *   arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
184	 *   it. Otherwise, ACK it.
185	 */
186	hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
187	for (i = 0; i < hwirq_num; i++) {
188		irq = irq_find_mapping(cirq_data->domain, i);
189		if (irq) {
190			pendret = irq_get_irqchip_state(irq,
191							IRQCHIP_STATE_PENDING,
192							&pending);
193
194			maskret = irq_get_irqchip_state(irq,
195							IRQCHIP_STATE_MASKED,
196							&masked);
197
198			if (pendret == 0 && maskret == 0 &&
199			    (pending && !masked))
200				continue;
201		}
202
 
203		mask = 1 << (i % 32);
204		writel_relaxed(mask, cirq_data->base + CIRQ_ACK + (i / 32) * 4);
205	}
206
207	/* set edge_only mode, record edge-triggerd interrupts */
208	/* enable cirq */
209	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
 
210	value |= (CIRQ_EDGE | CIRQ_EN);
211	writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
212
213	return 0;
214}
215
216static void mtk_cirq_resume(void)
217{
 
218	u32 value;
219
220	/* flush recored interrupts, will send signals to parent controller */
221	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
222	writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
223
224	/* disable cirq */
225	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
226	value &= ~(CIRQ_EDGE | CIRQ_EN);
227	writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
228}
229
230static struct syscore_ops mtk_cirq_syscore_ops = {
231	.suspend	= mtk_cirq_suspend,
232	.resume		= mtk_cirq_resume,
233};
234
235static void mtk_cirq_syscore_init(void)
236{
237	register_syscore_ops(&mtk_cirq_syscore_ops);
238}
239#else
240static inline void mtk_cirq_syscore_init(void) {}
241#endif
242
 
 
 
 
 
 
 
 
243static int __init mtk_cirq_of_init(struct device_node *node,
244				   struct device_node *parent)
245{
246	struct irq_domain *domain, *domain_parent;
 
247	unsigned int irq_num;
248	int ret;
249
250	domain_parent = irq_find_host(parent);
251	if (!domain_parent) {
252		pr_err("mtk_cirq: interrupt-parent not found\n");
253		return -EINVAL;
254	}
255
256	cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
257	if (!cirq_data)
258		return -ENOMEM;
259
260	cirq_data->base = of_iomap(node, 0);
261	if (!cirq_data->base) {
262		pr_err("mtk_cirq: unable to map cirq register\n");
263		ret = -ENXIO;
264		goto out_free;
265	}
266
267	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
268					 &cirq_data->ext_irq_start);
269	if (ret)
270		goto out_unmap;
271
272	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
273					 &cirq_data->ext_irq_end);
274	if (ret)
275		goto out_unmap;
 
 
 
 
 
 
 
276
277	irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
278	domain = irq_domain_add_hierarchy(domain_parent, 0,
279					  irq_num, node,
280					  &cirq_domain_ops, cirq_data);
281	if (!domain) {
282		ret = -ENOMEM;
283		goto out_unmap;
284	}
285	cirq_data->domain = domain;
286
287	mtk_cirq_syscore_init();
288
289	return 0;
290
291out_unmap:
292	iounmap(cirq_data->base);
293out_free:
294	kfree(cirq_data);
295	return ret;
296}
297
298IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2016 MediaTek Inc.
  4 * Author: Youlin.Pei <youlin.pei@mediatek.com>
  5 */
  6
  7#include <linux/interrupt.h>
  8#include <linux/io.h>
  9#include <linux/irq.h>
 10#include <linux/irqchip.h>
 11#include <linux/irqdomain.h>
 12#include <linux/of.h>
 13#include <linux/of_irq.h>
 14#include <linux/of_address.h>
 15#include <linux/slab.h>
 16#include <linux/syscore_ops.h>
 17
 18enum mtk_cirq_regoffs_index {
 19	CIRQ_STA,
 20	CIRQ_ACK,
 21	CIRQ_MASK_SET,
 22	CIRQ_MASK_CLR,
 23	CIRQ_SENS_SET,
 24	CIRQ_SENS_CLR,
 25	CIRQ_POL_SET,
 26	CIRQ_POL_CLR,
 27	CIRQ_CONTROL
 28};
 29
 30static const u32 mtk_cirq_regoffs_v1[] = {
 31	[CIRQ_STA]	= 0x0,
 32	[CIRQ_ACK]	= 0x40,
 33	[CIRQ_MASK_SET]	= 0xc0,
 34	[CIRQ_MASK_CLR]	= 0x100,
 35	[CIRQ_SENS_SET]	= 0x180,
 36	[CIRQ_SENS_CLR]	= 0x1c0,
 37	[CIRQ_POL_SET]	= 0x240,
 38	[CIRQ_POL_CLR]	= 0x280,
 39	[CIRQ_CONTROL]	= 0x300,
 40};
 41
 42static const u32 mtk_cirq_regoffs_v2[] = {
 43	[CIRQ_STA]	= 0x0,
 44	[CIRQ_ACK]	= 0x80,
 45	[CIRQ_MASK_SET]	= 0x180,
 46	[CIRQ_MASK_CLR]	= 0x200,
 47	[CIRQ_SENS_SET]	= 0x300,
 48	[CIRQ_SENS_CLR]	= 0x380,
 49	[CIRQ_POL_SET]	= 0x480,
 50	[CIRQ_POL_CLR]	= 0x500,
 51	[CIRQ_CONTROL]	= 0x600,
 52};
 53
 54#define CIRQ_EN	0x1
 55#define CIRQ_EDGE	0x2
 56#define CIRQ_FLUSH	0x4
 57
 58struct mtk_cirq_chip_data {
 59	void __iomem *base;
 60	unsigned int ext_irq_start;
 61	unsigned int ext_irq_end;
 62	const u32 *offsets;
 63	struct irq_domain *domain;
 64};
 65
 66static struct mtk_cirq_chip_data *cirq_data;
 67
 68static void __iomem *mtk_cirq_reg(struct mtk_cirq_chip_data *chip_data,
 69				  enum mtk_cirq_regoffs_index idx)
 70{
 71	return chip_data->base + chip_data->offsets[idx];
 72}
 73
 74static void __iomem *mtk_cirq_irq_reg(struct mtk_cirq_chip_data *chip_data,
 75				      enum mtk_cirq_regoffs_index idx,
 76				      unsigned int cirq_num)
 77{
 78	return mtk_cirq_reg(chip_data, idx) + (cirq_num / 32) * 4;
 79}
 80
 81static void mtk_cirq_write_mask(struct irq_data *data, enum mtk_cirq_regoffs_index idx)
 82{
 83	struct mtk_cirq_chip_data *chip_data = data->chip_data;
 84	unsigned int cirq_num = data->hwirq;
 85	u32 mask = 1 << (cirq_num % 32);
 86
 87	writel_relaxed(mask, mtk_cirq_irq_reg(chip_data, idx, cirq_num));
 88}
 89
 90static void mtk_cirq_mask(struct irq_data *data)
 91{
 92	mtk_cirq_write_mask(data, CIRQ_MASK_SET);
 93	irq_chip_mask_parent(data);
 94}
 95
 96static void mtk_cirq_unmask(struct irq_data *data)
 97{
 98	mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
 99	irq_chip_unmask_parent(data);
100}
101
102static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
103{
104	int ret;
105
106	switch (type & IRQ_TYPE_SENSE_MASK) {
107	case IRQ_TYPE_EDGE_FALLING:
108		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
109		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
110		break;
111	case IRQ_TYPE_EDGE_RISING:
112		mtk_cirq_write_mask(data, CIRQ_POL_SET);
113		mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
114		break;
115	case IRQ_TYPE_LEVEL_LOW:
116		mtk_cirq_write_mask(data, CIRQ_POL_CLR);
117		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
118		break;
119	case IRQ_TYPE_LEVEL_HIGH:
120		mtk_cirq_write_mask(data, CIRQ_POL_SET);
121		mtk_cirq_write_mask(data, CIRQ_SENS_SET);
122		break;
123	default:
124		break;
125	}
126
127	data = data->parent_data;
128	ret = data->chip->irq_set_type(data, type);
129	return ret;
130}
131
132static struct irq_chip mtk_cirq_chip = {
133	.name			= "MT_CIRQ",
134	.irq_mask		= mtk_cirq_mask,
135	.irq_unmask		= mtk_cirq_unmask,
136	.irq_eoi		= irq_chip_eoi_parent,
137	.irq_set_type		= mtk_cirq_set_type,
138	.irq_retrigger		= irq_chip_retrigger_hierarchy,
139#ifdef CONFIG_SMP
140	.irq_set_affinity	= irq_chip_set_affinity_parent,
141#endif
142};
143
144static int mtk_cirq_domain_translate(struct irq_domain *d,
145				     struct irq_fwspec *fwspec,
146				     unsigned long *hwirq,
147				     unsigned int *type)
148{
149	if (is_of_node(fwspec->fwnode)) {
150		if (fwspec->param_count != 3)
151			return -EINVAL;
152
153		/* No PPI should point to this domain */
154		if (fwspec->param[0] != 0)
155			return -EINVAL;
156
157		/* cirq support irq number check */
158		if (fwspec->param[1] < cirq_data->ext_irq_start ||
159		    fwspec->param[1] > cirq_data->ext_irq_end)
160			return -EINVAL;
161
162		*hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
163		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
164		return 0;
165	}
166
167	return -EINVAL;
168}
169
170static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
171				 unsigned int nr_irqs, void *arg)
172{
173	int ret;
174	irq_hw_number_t hwirq;
175	unsigned int type;
176	struct irq_fwspec *fwspec = arg;
177	struct irq_fwspec parent_fwspec = *fwspec;
178
179	ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
180	if (ret)
181		return ret;
182
183	if (WARN_ON(nr_irqs != 1))
184		return -EINVAL;
185
186	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
187				      &mtk_cirq_chip,
188				      domain->host_data);
189
190	parent_fwspec.fwnode = domain->parent->fwnode;
191	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
192					    &parent_fwspec);
193}
194
195static const struct irq_domain_ops cirq_domain_ops = {
196	.translate	= mtk_cirq_domain_translate,
197	.alloc		= mtk_cirq_domain_alloc,
198	.free		= irq_domain_free_irqs_common,
199};
200
201#ifdef CONFIG_PM_SLEEP
202static int mtk_cirq_suspend(void)
203{
204	void __iomem *reg;
205	u32 value, mask;
206	unsigned int irq, hwirq_num;
207	bool pending, masked;
208	int i, pendret, maskret;
209
210	/*
211	 * When external interrupts happened, CIRQ will record the status
212	 * even CIRQ is not enabled. When execute flush command, CIRQ will
213	 * resend the signals according to the status. So if don't clear the
214	 * status, CIRQ will resend the wrong signals.
215	 *
216	 * arch_suspend_disable_irqs() will be called before CIRQ suspend
217	 * callback. If clear all the status simply, the external interrupts
218	 * which happened between arch_suspend_disable_irqs and CIRQ suspend
219	 * callback will be lost. Using following steps to avoid this issue;
220	 *
221	 * - Iterate over all the CIRQ supported interrupts;
222	 * - For each interrupt, inspect its pending and masked status at GIC
223	 *   level;
224	 * - If pending and unmasked, it happened between
225	 *   arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
226	 *   it. Otherwise, ACK it.
227	 */
228	hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
229	for (i = 0; i < hwirq_num; i++) {
230		irq = irq_find_mapping(cirq_data->domain, i);
231		if (irq) {
232			pendret = irq_get_irqchip_state(irq,
233							IRQCHIP_STATE_PENDING,
234							&pending);
235
236			maskret = irq_get_irqchip_state(irq,
237							IRQCHIP_STATE_MASKED,
238							&masked);
239
240			if (pendret == 0 && maskret == 0 &&
241			    (pending && !masked))
242				continue;
243		}
244
245		reg = mtk_cirq_irq_reg(cirq_data, CIRQ_ACK, i);
246		mask = 1 << (i % 32);
247		writel_relaxed(mask, reg);
248	}
249
250	/* set edge_only mode, record edge-triggerd interrupts */
251	/* enable cirq */
252	reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL);
253	value = readl_relaxed(reg);
254	value |= (CIRQ_EDGE | CIRQ_EN);
255	writel_relaxed(value, reg);
256
257	return 0;
258}
259
260static void mtk_cirq_resume(void)
261{
262	void __iomem *reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL);
263	u32 value;
264
265	/* flush recorded interrupts, will send signals to parent controller */
266	value = readl_relaxed(reg);
267	writel_relaxed(value | CIRQ_FLUSH, reg);
268
269	/* disable cirq */
270	value = readl_relaxed(reg);
271	value &= ~(CIRQ_EDGE | CIRQ_EN);
272	writel_relaxed(value, reg);
273}
274
275static struct syscore_ops mtk_cirq_syscore_ops = {
276	.suspend	= mtk_cirq_suspend,
277	.resume		= mtk_cirq_resume,
278};
279
280static void mtk_cirq_syscore_init(void)
281{
282	register_syscore_ops(&mtk_cirq_syscore_ops);
283}
284#else
285static inline void mtk_cirq_syscore_init(void) {}
286#endif
287
288static const struct of_device_id mtk_cirq_of_match[] = {
289	{ .compatible = "mediatek,mt2701-cirq", .data = &mtk_cirq_regoffs_v1 },
290	{ .compatible = "mediatek,mt8135-cirq", .data = &mtk_cirq_regoffs_v1 },
291	{ .compatible = "mediatek,mt8173-cirq", .data = &mtk_cirq_regoffs_v1 },
292	{ .compatible = "mediatek,mt8192-cirq", .data = &mtk_cirq_regoffs_v2 },
293	{ /* sentinel */ }
294};
295
296static int __init mtk_cirq_of_init(struct device_node *node,
297				   struct device_node *parent)
298{
299	struct irq_domain *domain, *domain_parent;
300	const struct of_device_id *match;
301	unsigned int irq_num;
302	int ret;
303
304	domain_parent = irq_find_host(parent);
305	if (!domain_parent) {
306		pr_err("mtk_cirq: interrupt-parent not found\n");
307		return -EINVAL;
308	}
309
310	cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
311	if (!cirq_data)
312		return -ENOMEM;
313
314	cirq_data->base = of_iomap(node, 0);
315	if (!cirq_data->base) {
316		pr_err("mtk_cirq: unable to map cirq register\n");
317		ret = -ENXIO;
318		goto out_free;
319	}
320
321	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
322					 &cirq_data->ext_irq_start);
323	if (ret)
324		goto out_unmap;
325
326	ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
327					 &cirq_data->ext_irq_end);
328	if (ret)
329		goto out_unmap;
330
331	match = of_match_node(mtk_cirq_of_match, node);
332	if (!match) {
333		ret = -ENODEV;
334		goto out_unmap;
335	}
336	cirq_data->offsets = match->data;
337
338	irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
339	domain = irq_domain_add_hierarchy(domain_parent, 0,
340					  irq_num, node,
341					  &cirq_domain_ops, cirq_data);
342	if (!domain) {
343		ret = -ENOMEM;
344		goto out_unmap;
345	}
346	cirq_data->domain = domain;
347
348	mtk_cirq_syscore_init();
349
350	return 0;
351
352out_unmap:
353	iounmap(cirq_data->base);
354out_free:
355	kfree(cirq_data);
356	return ret;
357}
358
359IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);