Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * drivers/uio/uio_pdrv_genirq.c
  4 *
  5 * Userspace I/O platform driver with generic IRQ handling code.
  6 *
  7 * Copyright (C) 2008 Magnus Damm
  8 *
  9 * Based on uio_pdrv.c by Uwe Kleine-Koenig,
 10 * Copyright (C) 2008 by Digi International Inc.
 11 * All rights reserved.
 12 */
 13
 14#include <linux/platform_device.h>
 15#include <linux/uio_driver.h>
 16#include <linux/spinlock.h>
 17#include <linux/bitops.h>
 18#include <linux/module.h>
 19#include <linux/interrupt.h>
 20#include <linux/stringify.h>
 21#include <linux/pm_runtime.h>
 22#include <linux/slab.h>
 23#include <linux/irq.h>
 24
 25#include <linux/of.h>
 26#include <linux/of_platform.h>
 27#include <linux/of_address.h>
 28
 29#define DRIVER_NAME "uio_pdrv_genirq"
 30
 31struct uio_pdrv_genirq_platdata {
 32	struct uio_info *uioinfo;
 33	spinlock_t lock;
 34	unsigned long flags;
 35	struct platform_device *pdev;
 36};
 37
 38/* Bits in uio_pdrv_genirq_platdata.flags */
 39enum {
 40	UIO_IRQ_DISABLED = 0,
 41};
 42
 43static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode)
 44{
 45	struct uio_pdrv_genirq_platdata *priv = info->priv;
 46
 47	/* Wait until the Runtime PM code has woken up the device */
 48	pm_runtime_get_sync(&priv->pdev->dev);
 49	return 0;
 50}
 51
 52static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode)
 53{
 54	struct uio_pdrv_genirq_platdata *priv = info->priv;
 55
 56	/* Tell the Runtime PM code that the device has become idle */
 57	pm_runtime_put_sync(&priv->pdev->dev);
 58	return 0;
 59}
 60
 61static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info)
 62{
 63	struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
 64
 65	/* Just disable the interrupt in the interrupt controller, and
 66	 * remember the state so we can allow user space to enable it later.
 67	 */
 68
 69	spin_lock(&priv->lock);
 70	if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
 71		disable_irq_nosync(irq);
 72	spin_unlock(&priv->lock);
 73
 74	return IRQ_HANDLED;
 75}
 76
 77static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
 78{
 79	struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
 80	unsigned long flags;
 81
 82	/* Allow user space to enable and disable the interrupt
 83	 * in the interrupt controller, but keep track of the
 84	 * state to prevent per-irq depth damage.
 85	 *
 86	 * Serialize this operation to support multiple tasks and concurrency
 87	 * with irq handler on SMP systems.
 88	 */
 89
 90	spin_lock_irqsave(&priv->lock, flags);
 91	if (irq_on) {
 92		if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags))
 93			enable_irq(dev_info->irq);
 94	} else {
 95		if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
 96			disable_irq_nosync(dev_info->irq);
 97	}
 98	spin_unlock_irqrestore(&priv->lock, flags);
 99
100	return 0;
101}
102
103static void uio_pdrv_genirq_cleanup(void *data)
104{
105	struct device *dev = data;
106
107	pm_runtime_disable(dev);
108}
109
110static int uio_pdrv_genirq_probe(struct platform_device *pdev)
111{
112	struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
113	struct device_node *node = pdev->dev.of_node;
114	struct uio_pdrv_genirq_platdata *priv;
115	struct uio_mem *uiomem;
116	int ret = -EINVAL;
117	int i;
118
119	if (node) {
120		const char *name;
121
122		/* alloc uioinfo for one device */
123		uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
124				       GFP_KERNEL);
125		if (!uioinfo) {
126			dev_err(&pdev->dev, "unable to kmalloc\n");
127			return -ENOMEM;
128		}
129
130		if (!of_property_read_string(node, "linux,uio-name", &name))
131			uioinfo->name = devm_kstrdup(&pdev->dev, name, GFP_KERNEL);
132		else
133			uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
134						       "%pOFn", node);
135
136		uioinfo->version = "devicetree";
137		/* Multiple IRQs are not supported */
138	}
139
140	if (!uioinfo || !uioinfo->name || !uioinfo->version) {
141		dev_err(&pdev->dev, "missing platform_data\n");
142		return ret;
143	}
144
145	if (uioinfo->handler || uioinfo->irqcontrol ||
146	    uioinfo->irq_flags & IRQF_SHARED) {
147		dev_err(&pdev->dev, "interrupt configuration error\n");
148		return ret;
149	}
150
151	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
152	if (!priv) {
153		dev_err(&pdev->dev, "unable to kmalloc\n");
154		return -ENOMEM;
155	}
156
157	priv->uioinfo = uioinfo;
158	spin_lock_init(&priv->lock);
159	priv->flags = 0; /* interrupt is enabled to begin with */
160	priv->pdev = pdev;
161
162	if (!uioinfo->irq) {
163		ret = platform_get_irq_optional(pdev, 0);
164		uioinfo->irq = ret;
165		if (ret == -ENXIO)
166			uioinfo->irq = UIO_IRQ_NONE;
167		else if (ret == -EPROBE_DEFER)
168			return ret;
169		else if (ret < 0) {
170			dev_err(&pdev->dev, "failed to get IRQ\n");
171			return ret;
172		}
173	}
174
175	if (uioinfo->irq) {
176		struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
177
178		/*
179		 * If a level interrupt, dont do lazy disable. Otherwise the
180		 * irq will fire again since clearing of the actual cause, on
181		 * device level, is done in userspace
182		 * irqd_is_level_type() isn't used since isn't valid until
183		 * irq is configured.
184		 */
185		if (irq_data &&
186		    irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
187			dev_dbg(&pdev->dev, "disable lazy unmask\n");
188			irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
189		}
190	}
191
192	uiomem = &uioinfo->mem[0];
193
194	for (i = 0; i < pdev->num_resources; ++i) {
195		struct resource *r = &pdev->resource[i];
196
197		if (r->flags != IORESOURCE_MEM)
198			continue;
199
200		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
201			dev_warn(&pdev->dev, "device has more than "
202					__stringify(MAX_UIO_MAPS)
203					" I/O memory resources.\n");
204			break;
205		}
206
207		uiomem->memtype = UIO_MEM_PHYS;
208		uiomem->addr = r->start & PAGE_MASK;
209		uiomem->offs = r->start & ~PAGE_MASK;
210		uiomem->size = (uiomem->offs + resource_size(r)
211				+ PAGE_SIZE - 1) & PAGE_MASK;
212		uiomem->name = r->name;
213		++uiomem;
214	}
215
216	while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
217		uiomem->size = 0;
218		++uiomem;
219	}
220
221	/* This driver requires no hardware specific kernel code to handle
222	 * interrupts. Instead, the interrupt handler simply disables the
223	 * interrupt in the interrupt controller. User space is responsible
224	 * for performing hardware specific acknowledge and re-enabling of
225	 * the interrupt in the interrupt controller.
226	 *
227	 * Interrupt sharing is not supported.
228	 */
229
230	uioinfo->handler = uio_pdrv_genirq_handler;
231	uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
232	uioinfo->open = uio_pdrv_genirq_open;
233	uioinfo->release = uio_pdrv_genirq_release;
234	uioinfo->priv = priv;
235
236	/* Enable Runtime PM for this device:
237	 * The device starts in suspended state to allow the hardware to be
238	 * turned off by default. The Runtime PM bus code should power on the
239	 * hardware and enable clocks at open().
240	 */
241	pm_runtime_enable(&pdev->dev);
242
243	ret = devm_add_action_or_reset(&pdev->dev, uio_pdrv_genirq_cleanup,
244				       &pdev->dev);
245	if (ret)
 
246		return ret;
 
247
248	ret = devm_uio_register_device(&pdev->dev, priv->uioinfo);
249	if (ret)
250		dev_err(&pdev->dev, "unable to register uio device\n");
251
252	return ret;
 
 
 
 
 
 
 
 
 
 
253}
254
255static int uio_pdrv_genirq_runtime_nop(struct device *dev)
256{
257	/* Runtime PM callback shared between ->runtime_suspend()
258	 * and ->runtime_resume(). Simply returns success.
259	 *
260	 * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
261	 * are used at open() and release() time. This allows the
262	 * Runtime PM code to turn off power to the device while the
263	 * device is unused, ie before open() and after release().
264	 *
265	 * This Runtime PM callback does not need to save or restore
266	 * any registers since user space is responsbile for hardware
267	 * register reinitialization after open().
268	 */
269	return 0;
270}
271
272static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
273	.runtime_suspend = uio_pdrv_genirq_runtime_nop,
274	.runtime_resume = uio_pdrv_genirq_runtime_nop,
275};
276
277#ifdef CONFIG_OF
278static struct of_device_id uio_of_genirq_match[] = {
279	{ /* This is filled with module_parm */ },
280	{ /* Sentinel */ },
281};
282MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
283module_param_string(of_id, uio_of_genirq_match[0].compatible, 128, 0);
284MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio");
285#endif
286
287static struct platform_driver uio_pdrv_genirq = {
288	.probe = uio_pdrv_genirq_probe,
 
289	.driver = {
290		.name = DRIVER_NAME,
291		.pm = &uio_pdrv_genirq_dev_pm_ops,
292		.of_match_table = of_match_ptr(uio_of_genirq_match),
293	},
294};
295
296module_platform_driver(uio_pdrv_genirq);
297
298MODULE_AUTHOR("Magnus Damm");
299MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
300MODULE_LICENSE("GPL v2");
301MODULE_ALIAS("platform:" DRIVER_NAME);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * drivers/uio/uio_pdrv_genirq.c
  4 *
  5 * Userspace I/O platform driver with generic IRQ handling code.
  6 *
  7 * Copyright (C) 2008 Magnus Damm
  8 *
  9 * Based on uio_pdrv.c by Uwe Kleine-Koenig,
 10 * Copyright (C) 2008 by Digi International Inc.
 11 * All rights reserved.
 12 */
 13
 14#include <linux/platform_device.h>
 15#include <linux/uio_driver.h>
 16#include <linux/spinlock.h>
 17#include <linux/bitops.h>
 18#include <linux/module.h>
 19#include <linux/interrupt.h>
 20#include <linux/stringify.h>
 21#include <linux/pm_runtime.h>
 22#include <linux/slab.h>
 
 23
 24#include <linux/of.h>
 25#include <linux/of_platform.h>
 26#include <linux/of_address.h>
 27
 28#define DRIVER_NAME "uio_pdrv_genirq"
 29
 30struct uio_pdrv_genirq_platdata {
 31	struct uio_info *uioinfo;
 32	spinlock_t lock;
 33	unsigned long flags;
 34	struct platform_device *pdev;
 35};
 36
 37/* Bits in uio_pdrv_genirq_platdata.flags */
 38enum {
 39	UIO_IRQ_DISABLED = 0,
 40};
 41
 42static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode)
 43{
 44	struct uio_pdrv_genirq_platdata *priv = info->priv;
 45
 46	/* Wait until the Runtime PM code has woken up the device */
 47	pm_runtime_get_sync(&priv->pdev->dev);
 48	return 0;
 49}
 50
 51static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode)
 52{
 53	struct uio_pdrv_genirq_platdata *priv = info->priv;
 54
 55	/* Tell the Runtime PM code that the device has become idle */
 56	pm_runtime_put_sync(&priv->pdev->dev);
 57	return 0;
 58}
 59
 60static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info)
 61{
 62	struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
 63
 64	/* Just disable the interrupt in the interrupt controller, and
 65	 * remember the state so we can allow user space to enable it later.
 66	 */
 67
 68	spin_lock(&priv->lock);
 69	if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
 70		disable_irq_nosync(irq);
 71	spin_unlock(&priv->lock);
 72
 73	return IRQ_HANDLED;
 74}
 75
 76static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
 77{
 78	struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
 79	unsigned long flags;
 80
 81	/* Allow user space to enable and disable the interrupt
 82	 * in the interrupt controller, but keep track of the
 83	 * state to prevent per-irq depth damage.
 84	 *
 85	 * Serialize this operation to support multiple tasks and concurrency
 86	 * with irq handler on SMP systems.
 87	 */
 88
 89	spin_lock_irqsave(&priv->lock, flags);
 90	if (irq_on) {
 91		if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags))
 92			enable_irq(dev_info->irq);
 93	} else {
 94		if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
 95			disable_irq_nosync(dev_info->irq);
 96	}
 97	spin_unlock_irqrestore(&priv->lock, flags);
 98
 99	return 0;
100}
101
 
 
 
 
 
 
 
102static int uio_pdrv_genirq_probe(struct platform_device *pdev)
103{
104	struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
105	struct device_node *node = pdev->dev.of_node;
106	struct uio_pdrv_genirq_platdata *priv;
107	struct uio_mem *uiomem;
108	int ret = -EINVAL;
109	int i;
110
111	if (node) {
112		const char *name;
113
114		/* alloc uioinfo for one device */
115		uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
116				       GFP_KERNEL);
117		if (!uioinfo) {
118			dev_err(&pdev->dev, "unable to kmalloc\n");
119			return -ENOMEM;
120		}
121
122		if (!of_property_read_string(node, "linux,uio-name", &name))
123			uioinfo->name = devm_kstrdup(&pdev->dev, name, GFP_KERNEL);
124		else
125			uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
126						       "%pOFn", node);
127
128		uioinfo->version = "devicetree";
129		/* Multiple IRQs are not supported */
130	}
131
132	if (!uioinfo || !uioinfo->name || !uioinfo->version) {
133		dev_err(&pdev->dev, "missing platform_data\n");
134		return ret;
135	}
136
137	if (uioinfo->handler || uioinfo->irqcontrol ||
138	    uioinfo->irq_flags & IRQF_SHARED) {
139		dev_err(&pdev->dev, "interrupt configuration error\n");
140		return ret;
141	}
142
143	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
144	if (!priv) {
145		dev_err(&pdev->dev, "unable to kmalloc\n");
146		return -ENOMEM;
147	}
148
149	priv->uioinfo = uioinfo;
150	spin_lock_init(&priv->lock);
151	priv->flags = 0; /* interrupt is enabled to begin with */
152	priv->pdev = pdev;
153
154	if (!uioinfo->irq) {
155		ret = platform_get_irq(pdev, 0);
156		uioinfo->irq = ret;
157		if (ret == -ENXIO && pdev->dev.of_node)
158			uioinfo->irq = UIO_IRQ_NONE;
 
 
159		else if (ret < 0) {
160			dev_err(&pdev->dev, "failed to get IRQ\n");
161			return ret;
162		}
163	}
164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165	uiomem = &uioinfo->mem[0];
166
167	for (i = 0; i < pdev->num_resources; ++i) {
168		struct resource *r = &pdev->resource[i];
169
170		if (r->flags != IORESOURCE_MEM)
171			continue;
172
173		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
174			dev_warn(&pdev->dev, "device has more than "
175					__stringify(MAX_UIO_MAPS)
176					" I/O memory resources.\n");
177			break;
178		}
179
180		uiomem->memtype = UIO_MEM_PHYS;
181		uiomem->addr = r->start;
182		uiomem->size = resource_size(r);
 
 
183		uiomem->name = r->name;
184		++uiomem;
185	}
186
187	while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
188		uiomem->size = 0;
189		++uiomem;
190	}
191
192	/* This driver requires no hardware specific kernel code to handle
193	 * interrupts. Instead, the interrupt handler simply disables the
194	 * interrupt in the interrupt controller. User space is responsible
195	 * for performing hardware specific acknowledge and re-enabling of
196	 * the interrupt in the interrupt controller.
197	 *
198	 * Interrupt sharing is not supported.
199	 */
200
201	uioinfo->handler = uio_pdrv_genirq_handler;
202	uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
203	uioinfo->open = uio_pdrv_genirq_open;
204	uioinfo->release = uio_pdrv_genirq_release;
205	uioinfo->priv = priv;
206
207	/* Enable Runtime PM for this device:
208	 * The device starts in suspended state to allow the hardware to be
209	 * turned off by default. The Runtime PM bus code should power on the
210	 * hardware and enable clocks at open().
211	 */
212	pm_runtime_enable(&pdev->dev);
213
214	ret = uio_register_device(&pdev->dev, priv->uioinfo);
215	if (ret) {
216		dev_err(&pdev->dev, "unable to register uio device\n");
217		pm_runtime_disable(&pdev->dev);
218		return ret;
219	}
220
221	platform_set_drvdata(pdev, priv);
222	return 0;
223}
224
225static int uio_pdrv_genirq_remove(struct platform_device *pdev)
226{
227	struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev);
228
229	uio_unregister_device(priv->uioinfo);
230	pm_runtime_disable(&pdev->dev);
231
232	priv->uioinfo->handler = NULL;
233	priv->uioinfo->irqcontrol = NULL;
234
235	return 0;
236}
237
238static int uio_pdrv_genirq_runtime_nop(struct device *dev)
239{
240	/* Runtime PM callback shared between ->runtime_suspend()
241	 * and ->runtime_resume(). Simply returns success.
242	 *
243	 * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
244	 * are used at open() and release() time. This allows the
245	 * Runtime PM code to turn off power to the device while the
246	 * device is unused, ie before open() and after release().
247	 *
248	 * This Runtime PM callback does not need to save or restore
249	 * any registers since user space is responsbile for hardware
250	 * register reinitialization after open().
251	 */
252	return 0;
253}
254
255static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
256	.runtime_suspend = uio_pdrv_genirq_runtime_nop,
257	.runtime_resume = uio_pdrv_genirq_runtime_nop,
258};
259
260#ifdef CONFIG_OF
261static struct of_device_id uio_of_genirq_match[] = {
262	{ /* This is filled with module_parm */ },
263	{ /* Sentinel */ },
264};
265MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
266module_param_string(of_id, uio_of_genirq_match[0].compatible, 128, 0);
267MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio");
268#endif
269
270static struct platform_driver uio_pdrv_genirq = {
271	.probe = uio_pdrv_genirq_probe,
272	.remove = uio_pdrv_genirq_remove,
273	.driver = {
274		.name = DRIVER_NAME,
275		.pm = &uio_pdrv_genirq_dev_pm_ops,
276		.of_match_table = of_match_ptr(uio_of_genirq_match),
277	},
278};
279
280module_platform_driver(uio_pdrv_genirq);
281
282MODULE_AUTHOR("Magnus Damm");
283MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
284MODULE_LICENSE("GPL v2");
285MODULE_ALIAS("platform:" DRIVER_NAME);