Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  4 *		http://www.samsung.com
  5 *
  6 * Combiner irqchip for EXYNOS
 
 
 
 
  7 */
  8#include <linux/err.h>
  9#include <linux/export.h>
 10#include <linux/init.h>
 11#include <linux/io.h>
 12#include <linux/slab.h>
 13#include <linux/syscore_ops.h>
 14#include <linux/irqdomain.h>
 15#include <linux/irqchip.h>
 16#include <linux/irqchip/chained_irq.h>
 17#include <linux/interrupt.h>
 18#include <linux/of_address.h>
 19#include <linux/of_irq.h>
 20
 21#define COMBINER_ENABLE_SET	0x0
 22#define COMBINER_ENABLE_CLEAR	0x4
 23#define COMBINER_INT_STATUS	0xC
 24
 25#define IRQ_IN_COMBINER		8
 26
 27static DEFINE_SPINLOCK(irq_controller_lock);
 28
 29struct combiner_chip_data {
 30	unsigned int hwirq_offset;
 31	unsigned int irq_mask;
 32	void __iomem *base;
 33	unsigned int parent_irq;
 34#ifdef CONFIG_PM
 35	u32 pm_save;
 36#endif
 37};
 38
 39static struct combiner_chip_data *combiner_data;
 40static struct irq_domain *combiner_irq_domain;
 41static unsigned int max_nr = 20;
 42
 43static inline void __iomem *combiner_base(struct irq_data *data)
 44{
 45	struct combiner_chip_data *combiner_data =
 46		irq_data_get_irq_chip_data(data);
 47
 48	return combiner_data->base;
 49}
 50
 51static void combiner_mask_irq(struct irq_data *data)
 52{
 53	u32 mask = 1 << (data->hwirq % 32);
 54
 55	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
 56}
 57
 58static void combiner_unmask_irq(struct irq_data *data)
 59{
 60	u32 mask = 1 << (data->hwirq % 32);
 61
 62	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 63}
 64
 65static void combiner_handle_cascade_irq(struct irq_desc *desc)
 66{
 67	struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
 68	struct irq_chip *chip = irq_desc_get_chip(desc);
 69	unsigned int combiner_irq;
 70	unsigned long status;
 71	int ret;
 72
 73	chained_irq_enter(chip, desc);
 74
 75	spin_lock(&irq_controller_lock);
 76	status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
 77	spin_unlock(&irq_controller_lock);
 78	status &= chip_data->irq_mask;
 79
 80	if (status == 0)
 81		goto out;
 82
 83	combiner_irq = chip_data->hwirq_offset + __ffs(status);
 84	ret = generic_handle_domain_irq(combiner_irq_domain, combiner_irq);
 85	if (unlikely(ret))
 
 86		handle_bad_irq(desc);
 
 
 87
 88 out:
 89	chained_irq_exit(chip, desc);
 90}
 91
 92#ifdef CONFIG_SMP
 93static int combiner_set_affinity(struct irq_data *d,
 94				 const struct cpumask *mask_val, bool force)
 95{
 96	struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
 97	struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
 98	struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
 99
100	if (chip && chip->irq_set_affinity)
101		return chip->irq_set_affinity(data, mask_val, force);
102	else
103		return -EINVAL;
104}
105#endif
106
107static struct irq_chip combiner_chip = {
108	.name			= "COMBINER",
109	.irq_mask		= combiner_mask_irq,
110	.irq_unmask		= combiner_unmask_irq,
111#ifdef CONFIG_SMP
112	.irq_set_affinity	= combiner_set_affinity,
113#endif
114};
115
116static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
117					unsigned int irq)
118{
119	irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
120					 combiner_data);
121}
122
123static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
124				     unsigned int combiner_nr,
125				     void __iomem *base, unsigned int irq)
126{
127	combiner_data->base = base;
128	combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
129	combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
130	combiner_data->parent_irq = irq;
131
132	/* Disable all interrupts */
133	writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
134}
135
136static int combiner_irq_domain_xlate(struct irq_domain *d,
137				     struct device_node *controller,
138				     const u32 *intspec, unsigned int intsize,
139				     unsigned long *out_hwirq,
140				     unsigned int *out_type)
141{
142	if (irq_domain_get_of_node(d) != controller)
143		return -EINVAL;
144
145	if (intsize < 2)
146		return -EINVAL;
147
148	*out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
149	*out_type = 0;
150
151	return 0;
152}
153
154static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
155				   irq_hw_number_t hw)
156{
157	struct combiner_chip_data *combiner_data = d->host_data;
158
159	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
160	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
161	irq_set_probe(irq);
162
163	return 0;
164}
165
166static const struct irq_domain_ops combiner_irq_domain_ops = {
167	.xlate	= combiner_irq_domain_xlate,
168	.map	= combiner_irq_domain_map,
169};
170
171static void __init combiner_init(void __iomem *combiner_base,
172				 struct device_node *np)
173{
174	int i, irq;
175	unsigned int nr_irq;
176
177	nr_irq = max_nr * IRQ_IN_COMBINER;
178
179	combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
180	if (!combiner_data)
 
181		return;
 
182
183	combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
184				&combiner_irq_domain_ops, combiner_data);
185	if (WARN_ON(!combiner_irq_domain)) {
186		pr_warn("%s: irq domain init failed\n", __func__);
187		return;
188	}
189
190	for (i = 0; i < max_nr; i++) {
191		irq = irq_of_parse_and_map(np, i);
192
193		combiner_init_one(&combiner_data[i], i,
194				  combiner_base + (i >> 2) * 0x10, irq);
195		combiner_cascade_irq(&combiner_data[i], irq);
196	}
197}
198
199#ifdef CONFIG_PM
200
201/**
202 * combiner_suspend - save interrupt combiner state before suspend
203 *
204 * Save the interrupt enable set register for all combiner groups since
205 * the state is lost when the system enters into a sleep state.
206 *
207 */
208static int combiner_suspend(void)
209{
210	int i;
211
212	for (i = 0; i < max_nr; i++)
213		combiner_data[i].pm_save =
214			readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
215
216	return 0;
217}
218
219/**
220 * combiner_resume - restore interrupt combiner state after resume
221 *
222 * Restore the interrupt enable set register for all combiner groups since
223 * the state is lost when the system enters into a sleep state on suspend.
224 *
225 */
226static void combiner_resume(void)
227{
228	int i;
229
230	for (i = 0; i < max_nr; i++) {
231		writel_relaxed(combiner_data[i].irq_mask,
232			     combiner_data[i].base + COMBINER_ENABLE_CLEAR);
233		writel_relaxed(combiner_data[i].pm_save,
234			     combiner_data[i].base + COMBINER_ENABLE_SET);
235	}
236}
237
238#else
239#define combiner_suspend	NULL
240#define combiner_resume		NULL
241#endif
242
243static struct syscore_ops combiner_syscore_ops = {
244	.suspend	= combiner_suspend,
245	.resume		= combiner_resume,
246};
247
248static int __init combiner_of_init(struct device_node *np,
249				   struct device_node *parent)
250{
251	void __iomem *combiner_base;
252
253	combiner_base = of_iomap(np, 0);
254	if (!combiner_base) {
255		pr_err("%s: failed to map combiner registers\n", __func__);
256		return -ENXIO;
257	}
258
259	if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
260		pr_info("%s: number of combiners not specified, "
261			"setting default as %d.\n",
262			__func__, max_nr);
263	}
264
265	combiner_init(combiner_base, np);
266
267	register_syscore_ops(&combiner_syscore_ops);
268
269	return 0;
270}
271IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
272		combiner_of_init);
v4.10.11
 
  1/*
  2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  3 *		http://www.samsung.com
  4 *
  5 * Combiner irqchip for EXYNOS
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#include <linux/err.h>
 12#include <linux/export.h>
 13#include <linux/init.h>
 14#include <linux/io.h>
 15#include <linux/slab.h>
 16#include <linux/syscore_ops.h>
 17#include <linux/irqdomain.h>
 18#include <linux/irqchip.h>
 19#include <linux/irqchip/chained_irq.h>
 20#include <linux/interrupt.h>
 21#include <linux/of_address.h>
 22#include <linux/of_irq.h>
 23
 24#define COMBINER_ENABLE_SET	0x0
 25#define COMBINER_ENABLE_CLEAR	0x4
 26#define COMBINER_INT_STATUS	0xC
 27
 28#define IRQ_IN_COMBINER		8
 29
 30static DEFINE_SPINLOCK(irq_controller_lock);
 31
 32struct combiner_chip_data {
 33	unsigned int hwirq_offset;
 34	unsigned int irq_mask;
 35	void __iomem *base;
 36	unsigned int parent_irq;
 37#ifdef CONFIG_PM
 38	u32 pm_save;
 39#endif
 40};
 41
 42static struct combiner_chip_data *combiner_data;
 43static struct irq_domain *combiner_irq_domain;
 44static unsigned int max_nr = 20;
 45
 46static inline void __iomem *combiner_base(struct irq_data *data)
 47{
 48	struct combiner_chip_data *combiner_data =
 49		irq_data_get_irq_chip_data(data);
 50
 51	return combiner_data->base;
 52}
 53
 54static void combiner_mask_irq(struct irq_data *data)
 55{
 56	u32 mask = 1 << (data->hwirq % 32);
 57
 58	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
 59}
 60
 61static void combiner_unmask_irq(struct irq_data *data)
 62{
 63	u32 mask = 1 << (data->hwirq % 32);
 64
 65	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 66}
 67
 68static void combiner_handle_cascade_irq(struct irq_desc *desc)
 69{
 70	struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
 71	struct irq_chip *chip = irq_desc_get_chip(desc);
 72	unsigned int cascade_irq, combiner_irq;
 73	unsigned long status;
 
 74
 75	chained_irq_enter(chip, desc);
 76
 77	spin_lock(&irq_controller_lock);
 78	status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
 79	spin_unlock(&irq_controller_lock);
 80	status &= chip_data->irq_mask;
 81
 82	if (status == 0)
 83		goto out;
 84
 85	combiner_irq = chip_data->hwirq_offset + __ffs(status);
 86	cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
 87
 88	if (unlikely(!cascade_irq))
 89		handle_bad_irq(desc);
 90	else
 91		generic_handle_irq(cascade_irq);
 92
 93 out:
 94	chained_irq_exit(chip, desc);
 95}
 96
 97#ifdef CONFIG_SMP
 98static int combiner_set_affinity(struct irq_data *d,
 99				 const struct cpumask *mask_val, bool force)
100{
101	struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
102	struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
103	struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
104
105	if (chip && chip->irq_set_affinity)
106		return chip->irq_set_affinity(data, mask_val, force);
107	else
108		return -EINVAL;
109}
110#endif
111
112static struct irq_chip combiner_chip = {
113	.name			= "COMBINER",
114	.irq_mask		= combiner_mask_irq,
115	.irq_unmask		= combiner_unmask_irq,
116#ifdef CONFIG_SMP
117	.irq_set_affinity	= combiner_set_affinity,
118#endif
119};
120
121static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
122					unsigned int irq)
123{
124	irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
125					 combiner_data);
126}
127
128static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
129				     unsigned int combiner_nr,
130				     void __iomem *base, unsigned int irq)
131{
132	combiner_data->base = base;
133	combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
134	combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
135	combiner_data->parent_irq = irq;
136
137	/* Disable all interrupts */
138	writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
139}
140
141static int combiner_irq_domain_xlate(struct irq_domain *d,
142				     struct device_node *controller,
143				     const u32 *intspec, unsigned int intsize,
144				     unsigned long *out_hwirq,
145				     unsigned int *out_type)
146{
147	if (irq_domain_get_of_node(d) != controller)
148		return -EINVAL;
149
150	if (intsize < 2)
151		return -EINVAL;
152
153	*out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
154	*out_type = 0;
155
156	return 0;
157}
158
159static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
160				   irq_hw_number_t hw)
161{
162	struct combiner_chip_data *combiner_data = d->host_data;
163
164	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
165	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
166	irq_set_probe(irq);
167
168	return 0;
169}
170
171static const struct irq_domain_ops combiner_irq_domain_ops = {
172	.xlate	= combiner_irq_domain_xlate,
173	.map	= combiner_irq_domain_map,
174};
175
176static void __init combiner_init(void __iomem *combiner_base,
177				 struct device_node *np)
178{
179	int i, irq;
180	unsigned int nr_irq;
181
182	nr_irq = max_nr * IRQ_IN_COMBINER;
183
184	combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
185	if (!combiner_data) {
186		pr_warn("%s: could not allocate combiner data\n", __func__);
187		return;
188	}
189
190	combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
191				&combiner_irq_domain_ops, combiner_data);
192	if (WARN_ON(!combiner_irq_domain)) {
193		pr_warn("%s: irq domain init failed\n", __func__);
194		return;
195	}
196
197	for (i = 0; i < max_nr; i++) {
198		irq = irq_of_parse_and_map(np, i);
199
200		combiner_init_one(&combiner_data[i], i,
201				  combiner_base + (i >> 2) * 0x10, irq);
202		combiner_cascade_irq(&combiner_data[i], irq);
203	}
204}
205
206#ifdef CONFIG_PM
207
208/**
209 * combiner_suspend - save interrupt combiner state before suspend
210 *
211 * Save the interrupt enable set register for all combiner groups since
212 * the state is lost when the system enters into a sleep state.
213 *
214 */
215static int combiner_suspend(void)
216{
217	int i;
218
219	for (i = 0; i < max_nr; i++)
220		combiner_data[i].pm_save =
221			readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
222
223	return 0;
224}
225
226/**
227 * combiner_resume - restore interrupt combiner state after resume
228 *
229 * Restore the interrupt enable set register for all combiner groups since
230 * the state is lost when the system enters into a sleep state on suspend.
231 *
232 */
233static void combiner_resume(void)
234{
235	int i;
236
237	for (i = 0; i < max_nr; i++) {
238		writel_relaxed(combiner_data[i].irq_mask,
239			     combiner_data[i].base + COMBINER_ENABLE_CLEAR);
240		writel_relaxed(combiner_data[i].pm_save,
241			     combiner_data[i].base + COMBINER_ENABLE_SET);
242	}
243}
244
245#else
246#define combiner_suspend	NULL
247#define combiner_resume		NULL
248#endif
249
250static struct syscore_ops combiner_syscore_ops = {
251	.suspend	= combiner_suspend,
252	.resume		= combiner_resume,
253};
254
255static int __init combiner_of_init(struct device_node *np,
256				   struct device_node *parent)
257{
258	void __iomem *combiner_base;
259
260	combiner_base = of_iomap(np, 0);
261	if (!combiner_base) {
262		pr_err("%s: failed to map combiner registers\n", __func__);
263		return -ENXIO;
264	}
265
266	if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
267		pr_info("%s: number of combiners not specified, "
268			"setting default as %d.\n",
269			__func__, max_nr);
270	}
271
272	combiner_init(combiner_base, np);
273
274	register_syscore_ops(&combiner_syscore_ops);
275
276	return 0;
277}
278IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
279		combiner_of_init);