Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * drivers/uio/uio_dmem_genirq.c
4 *
5 * Userspace I/O platform driver with generic IRQ handling code.
6 *
7 * Copyright (C) 2012 Damian Hobson-Garcia
8 *
9 * Based on uio_pdrv_genirq.c by Magnus Damm
10 */
11
12#include <linux/platform_device.h>
13#include <linux/uio_driver.h>
14#include <linux/spinlock.h>
15#include <linux/bitops.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/platform_data/uio_dmem_genirq.h>
19#include <linux/stringify.h>
20#include <linux/pm_runtime.h>
21#include <linux/dma-mapping.h>
22#include <linux/slab.h>
23#include <linux/irq.h>
24
25#include <linux/of.h>
26#include <linux/of_platform.h>
27#include <linux/of_address.h>
28
29#define DRIVER_NAME "uio_dmem_genirq"
30#define DMEM_MAP_ERROR (~0)
31
32struct uio_dmem_genirq_platdata {
33 struct uio_info *uioinfo;
34 spinlock_t lock;
35 unsigned long flags;
36 struct platform_device *pdev;
37 unsigned int dmem_region_start;
38 unsigned int num_dmem_regions;
39 void *dmem_region_vaddr[MAX_UIO_MAPS];
40 struct mutex alloc_lock;
41 unsigned int refcnt;
42};
43
44static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
45{
46 struct uio_dmem_genirq_platdata *priv = info->priv;
47 struct uio_mem *uiomem;
48 int dmem_region = priv->dmem_region_start;
49
50 uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
51
52 mutex_lock(&priv->alloc_lock);
53 while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
54 void *addr;
55 if (!uiomem->size)
56 break;
57
58 addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
59 (dma_addr_t *)&uiomem->addr, GFP_KERNEL);
60 if (!addr) {
61 uiomem->addr = DMEM_MAP_ERROR;
62 }
63 priv->dmem_region_vaddr[dmem_region++] = addr;
64 ++uiomem;
65 }
66 priv->refcnt++;
67
68 mutex_unlock(&priv->alloc_lock);
69 /* Wait until the Runtime PM code has woken up the device */
70 pm_runtime_get_sync(&priv->pdev->dev);
71 return 0;
72}
73
74static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
75{
76 struct uio_dmem_genirq_platdata *priv = info->priv;
77 struct uio_mem *uiomem;
78 int dmem_region = priv->dmem_region_start;
79
80 /* Tell the Runtime PM code that the device has become idle */
81 pm_runtime_put_sync(&priv->pdev->dev);
82
83 uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
84
85 mutex_lock(&priv->alloc_lock);
86
87 priv->refcnt--;
88 while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
89 if (!uiomem->size)
90 break;
91 if (priv->dmem_region_vaddr[dmem_region]) {
92 dma_free_coherent(&priv->pdev->dev, uiomem->size,
93 priv->dmem_region_vaddr[dmem_region],
94 uiomem->addr);
95 }
96 uiomem->addr = DMEM_MAP_ERROR;
97 ++dmem_region;
98 ++uiomem;
99 }
100
101 mutex_unlock(&priv->alloc_lock);
102 return 0;
103}
104
105static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
106{
107 struct uio_dmem_genirq_platdata *priv = dev_info->priv;
108
109 /* Just disable the interrupt in the interrupt controller, and
110 * remember the state so we can allow user space to enable it later.
111 */
112
113 if (!test_and_set_bit(0, &priv->flags))
114 disable_irq_nosync(irq);
115
116 return IRQ_HANDLED;
117}
118
119static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
120{
121 struct uio_dmem_genirq_platdata *priv = dev_info->priv;
122 unsigned long flags;
123
124 /* Allow user space to enable and disable the interrupt
125 * in the interrupt controller, but keep track of the
126 * state to prevent per-irq depth damage.
127 *
128 * Serialize this operation to support multiple tasks.
129 */
130
131 spin_lock_irqsave(&priv->lock, flags);
132 if (irq_on) {
133 if (test_and_clear_bit(0, &priv->flags))
134 enable_irq(dev_info->irq);
135 spin_unlock_irqrestore(&priv->lock, flags);
136 } else {
137 if (!test_and_set_bit(0, &priv->flags)) {
138 spin_unlock_irqrestore(&priv->lock, flags);
139 disable_irq(dev_info->irq);
140 }
141 }
142
143 return 0;
144}
145
146static int uio_dmem_genirq_probe(struct platform_device *pdev)
147{
148 struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
149 struct uio_info *uioinfo = &pdata->uioinfo;
150 struct uio_dmem_genirq_platdata *priv;
151 struct uio_mem *uiomem;
152 int ret = -EINVAL;
153 int i;
154
155 if (pdev->dev.of_node) {
156 /* alloc uioinfo for one device */
157 uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
158 if (!uioinfo) {
159 ret = -ENOMEM;
160 dev_err(&pdev->dev, "unable to kmalloc\n");
161 goto bad2;
162 }
163 uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
164 pdev->dev.of_node);
165 uioinfo->version = "devicetree";
166 }
167
168 if (!uioinfo || !uioinfo->name || !uioinfo->version) {
169 dev_err(&pdev->dev, "missing platform_data\n");
170 goto bad0;
171 }
172
173 if (uioinfo->handler || uioinfo->irqcontrol ||
174 uioinfo->irq_flags & IRQF_SHARED) {
175 dev_err(&pdev->dev, "interrupt configuration error\n");
176 goto bad0;
177 }
178
179 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
180 if (!priv) {
181 ret = -ENOMEM;
182 dev_err(&pdev->dev, "unable to kmalloc\n");
183 goto bad0;
184 }
185
186 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
187
188 priv->uioinfo = uioinfo;
189 spin_lock_init(&priv->lock);
190 priv->flags = 0; /* interrupt is enabled to begin with */
191 priv->pdev = pdev;
192 mutex_init(&priv->alloc_lock);
193
194 if (!uioinfo->irq) {
195 /* Multiple IRQs are not supported */
196 ret = platform_get_irq(pdev, 0);
197 if (ret == -ENXIO && pdev->dev.of_node)
198 ret = UIO_IRQ_NONE;
199 else if (ret < 0)
200 goto bad1;
201 uioinfo->irq = ret;
202 }
203
204 if (uioinfo->irq) {
205 struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
206
207 /*
208 * If a level interrupt, dont do lazy disable. Otherwise the
209 * irq will fire again since clearing of the actual cause, on
210 * device level, is done in userspace
211 * irqd_is_level_type() isn't used since isn't valid until
212 * irq is configured.
213 */
214 if (irq_data &&
215 irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
216 dev_dbg(&pdev->dev, "disable lazy unmask\n");
217 irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
218 }
219 }
220
221 uiomem = &uioinfo->mem[0];
222
223 for (i = 0; i < pdev->num_resources; ++i) {
224 struct resource *r = &pdev->resource[i];
225
226 if (r->flags != IORESOURCE_MEM)
227 continue;
228
229 if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
230 dev_warn(&pdev->dev, "device has more than "
231 __stringify(MAX_UIO_MAPS)
232 " I/O memory resources.\n");
233 break;
234 }
235
236 uiomem->memtype = UIO_MEM_PHYS;
237 uiomem->addr = r->start;
238 uiomem->size = resource_size(r);
239 ++uiomem;
240 }
241
242 priv->dmem_region_start = uiomem - &uioinfo->mem[0];
243 priv->num_dmem_regions = pdata->num_dynamic_regions;
244
245 for (i = 0; i < pdata->num_dynamic_regions; ++i) {
246 if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
247 dev_warn(&pdev->dev, "device has more than "
248 __stringify(MAX_UIO_MAPS)
249 " dynamic and fixed memory regions.\n");
250 break;
251 }
252 uiomem->memtype = UIO_MEM_PHYS;
253 uiomem->addr = DMEM_MAP_ERROR;
254 uiomem->size = pdata->dynamic_region_sizes[i];
255 ++uiomem;
256 }
257
258 while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
259 uiomem->size = 0;
260 ++uiomem;
261 }
262
263 /* This driver requires no hardware specific kernel code to handle
264 * interrupts. Instead, the interrupt handler simply disables the
265 * interrupt in the interrupt controller. User space is responsible
266 * for performing hardware specific acknowledge and re-enabling of
267 * the interrupt in the interrupt controller.
268 *
269 * Interrupt sharing is not supported.
270 */
271
272 uioinfo->handler = uio_dmem_genirq_handler;
273 uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
274 uioinfo->open = uio_dmem_genirq_open;
275 uioinfo->release = uio_dmem_genirq_release;
276 uioinfo->priv = priv;
277
278 /* Enable Runtime PM for this device:
279 * The device starts in suspended state to allow the hardware to be
280 * turned off by default. The Runtime PM bus code should power on the
281 * hardware and enable clocks at open().
282 */
283 pm_runtime_enable(&pdev->dev);
284
285 ret = uio_register_device(&pdev->dev, priv->uioinfo);
286 if (ret) {
287 dev_err(&pdev->dev, "unable to register uio device\n");
288 pm_runtime_disable(&pdev->dev);
289 goto bad1;
290 }
291
292 platform_set_drvdata(pdev, priv);
293 return 0;
294 bad1:
295 kfree(priv);
296 bad0:
297 /* kfree uioinfo for OF */
298 if (pdev->dev.of_node)
299 kfree(uioinfo);
300 bad2:
301 return ret;
302}
303
304static int uio_dmem_genirq_remove(struct platform_device *pdev)
305{
306 struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev);
307
308 uio_unregister_device(priv->uioinfo);
309 pm_runtime_disable(&pdev->dev);
310
311 priv->uioinfo->handler = NULL;
312 priv->uioinfo->irqcontrol = NULL;
313
314 /* kfree uioinfo for OF */
315 if (pdev->dev.of_node)
316 kfree(priv->uioinfo);
317
318 kfree(priv);
319 return 0;
320}
321
322static int uio_dmem_genirq_runtime_nop(struct device *dev)
323{
324 /* Runtime PM callback shared between ->runtime_suspend()
325 * and ->runtime_resume(). Simply returns success.
326 *
327 * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
328 * are used at open() and release() time. This allows the
329 * Runtime PM code to turn off power to the device while the
330 * device is unused, ie before open() and after release().
331 *
332 * This Runtime PM callback does not need to save or restore
333 * any registers since user space is responsbile for hardware
334 * register reinitialization after open().
335 */
336 return 0;
337}
338
339static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
340 .runtime_suspend = uio_dmem_genirq_runtime_nop,
341 .runtime_resume = uio_dmem_genirq_runtime_nop,
342};
343
344#ifdef CONFIG_OF
345static const struct of_device_id uio_of_genirq_match[] = {
346 { /* empty for now */ },
347};
348MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
349#endif
350
351static struct platform_driver uio_dmem_genirq = {
352 .probe = uio_dmem_genirq_probe,
353 .remove = uio_dmem_genirq_remove,
354 .driver = {
355 .name = DRIVER_NAME,
356 .pm = &uio_dmem_genirq_dev_pm_ops,
357 .of_match_table = of_match_ptr(uio_of_genirq_match),
358 },
359};
360
361module_platform_driver(uio_dmem_genirq);
362
363MODULE_AUTHOR("Damian Hobson-Garcia");
364MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory.");
365MODULE_LICENSE("GPL v2");
366MODULE_ALIAS("platform:" DRIVER_NAME);
1/*
2 * drivers/uio/uio_dmem_genirq.c
3 *
4 * Userspace I/O platform driver with generic IRQ handling code.
5 *
6 * Copyright (C) 2012 Damian Hobson-Garcia
7 *
8 * Based on uio_pdrv_genirq.c by Magnus Damm
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 */
14
15#include <linux/platform_device.h>
16#include <linux/uio_driver.h>
17#include <linux/spinlock.h>
18#include <linux/bitops.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/platform_data/uio_dmem_genirq.h>
22#include <linux/stringify.h>
23#include <linux/pm_runtime.h>
24#include <linux/dma-mapping.h>
25#include <linux/slab.h>
26
27#include <linux/of.h>
28#include <linux/of_platform.h>
29#include <linux/of_address.h>
30
31#define DRIVER_NAME "uio_dmem_genirq"
32#define DMEM_MAP_ERROR (~0)
33
34struct uio_dmem_genirq_platdata {
35 struct uio_info *uioinfo;
36 spinlock_t lock;
37 unsigned long flags;
38 struct platform_device *pdev;
39 unsigned int dmem_region_start;
40 unsigned int num_dmem_regions;
41 void *dmem_region_vaddr[MAX_UIO_MAPS];
42 struct mutex alloc_lock;
43 unsigned int refcnt;
44};
45
46static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
47{
48 struct uio_dmem_genirq_platdata *priv = info->priv;
49 struct uio_mem *uiomem;
50 int ret = 0;
51 int dmem_region = priv->dmem_region_start;
52
53 uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
54
55 mutex_lock(&priv->alloc_lock);
56 while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
57 void *addr;
58 if (!uiomem->size)
59 break;
60
61 addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
62 (dma_addr_t *)&uiomem->addr, GFP_KERNEL);
63 if (!addr) {
64 uiomem->addr = DMEM_MAP_ERROR;
65 }
66 priv->dmem_region_vaddr[dmem_region++] = addr;
67 ++uiomem;
68 }
69 priv->refcnt++;
70
71 mutex_unlock(&priv->alloc_lock);
72 /* Wait until the Runtime PM code has woken up the device */
73 pm_runtime_get_sync(&priv->pdev->dev);
74 return ret;
75}
76
77static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
78{
79 struct uio_dmem_genirq_platdata *priv = info->priv;
80 struct uio_mem *uiomem;
81 int dmem_region = priv->dmem_region_start;
82
83 /* Tell the Runtime PM code that the device has become idle */
84 pm_runtime_put_sync(&priv->pdev->dev);
85
86 uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
87
88 mutex_lock(&priv->alloc_lock);
89
90 priv->refcnt--;
91 while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
92 if (!uiomem->size)
93 break;
94 if (priv->dmem_region_vaddr[dmem_region]) {
95 dma_free_coherent(&priv->pdev->dev, uiomem->size,
96 priv->dmem_region_vaddr[dmem_region],
97 uiomem->addr);
98 }
99 uiomem->addr = DMEM_MAP_ERROR;
100 ++dmem_region;
101 ++uiomem;
102 }
103
104 mutex_unlock(&priv->alloc_lock);
105 return 0;
106}
107
108static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
109{
110 struct uio_dmem_genirq_platdata *priv = dev_info->priv;
111
112 /* Just disable the interrupt in the interrupt controller, and
113 * remember the state so we can allow user space to enable it later.
114 */
115
116 if (!test_and_set_bit(0, &priv->flags))
117 disable_irq_nosync(irq);
118
119 return IRQ_HANDLED;
120}
121
122static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
123{
124 struct uio_dmem_genirq_platdata *priv = dev_info->priv;
125 unsigned long flags;
126
127 /* Allow user space to enable and disable the interrupt
128 * in the interrupt controller, but keep track of the
129 * state to prevent per-irq depth damage.
130 *
131 * Serialize this operation to support multiple tasks.
132 */
133
134 spin_lock_irqsave(&priv->lock, flags);
135 if (irq_on) {
136 if (test_and_clear_bit(0, &priv->flags))
137 enable_irq(dev_info->irq);
138 } else {
139 if (!test_and_set_bit(0, &priv->flags))
140 disable_irq(dev_info->irq);
141 }
142 spin_unlock_irqrestore(&priv->lock, flags);
143
144 return 0;
145}
146
147static int uio_dmem_genirq_probe(struct platform_device *pdev)
148{
149 struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
150 struct uio_info *uioinfo = &pdata->uioinfo;
151 struct uio_dmem_genirq_platdata *priv;
152 struct uio_mem *uiomem;
153 int ret = -EINVAL;
154 int i;
155
156 if (pdev->dev.of_node) {
157 int irq;
158
159 /* alloc uioinfo for one device */
160 uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
161 if (!uioinfo) {
162 ret = -ENOMEM;
163 dev_err(&pdev->dev, "unable to kmalloc\n");
164 goto bad2;
165 }
166 uioinfo->name = pdev->dev.of_node->name;
167 uioinfo->version = "devicetree";
168
169 /* Multiple IRQs are not supported */
170 irq = platform_get_irq(pdev, 0);
171 if (irq == -ENXIO)
172 uioinfo->irq = UIO_IRQ_NONE;
173 else
174 uioinfo->irq = irq;
175 }
176
177 if (!uioinfo || !uioinfo->name || !uioinfo->version) {
178 dev_err(&pdev->dev, "missing platform_data\n");
179 goto bad0;
180 }
181
182 if (uioinfo->handler || uioinfo->irqcontrol ||
183 uioinfo->irq_flags & IRQF_SHARED) {
184 dev_err(&pdev->dev, "interrupt configuration error\n");
185 goto bad0;
186 }
187
188 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
189 if (!priv) {
190 ret = -ENOMEM;
191 dev_err(&pdev->dev, "unable to kmalloc\n");
192 goto bad0;
193 }
194
195 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
196
197 priv->uioinfo = uioinfo;
198 spin_lock_init(&priv->lock);
199 priv->flags = 0; /* interrupt is enabled to begin with */
200 priv->pdev = pdev;
201 mutex_init(&priv->alloc_lock);
202
203 if (!uioinfo->irq) {
204 ret = platform_get_irq(pdev, 0);
205 if (ret < 0) {
206 dev_err(&pdev->dev, "failed to get IRQ\n");
207 goto bad1;
208 }
209 uioinfo->irq = ret;
210 }
211 uiomem = &uioinfo->mem[0];
212
213 for (i = 0; i < pdev->num_resources; ++i) {
214 struct resource *r = &pdev->resource[i];
215
216 if (r->flags != IORESOURCE_MEM)
217 continue;
218
219 if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
220 dev_warn(&pdev->dev, "device has more than "
221 __stringify(MAX_UIO_MAPS)
222 " I/O memory resources.\n");
223 break;
224 }
225
226 uiomem->memtype = UIO_MEM_PHYS;
227 uiomem->addr = r->start;
228 uiomem->size = resource_size(r);
229 ++uiomem;
230 }
231
232 priv->dmem_region_start = uiomem - &uioinfo->mem[0];
233 priv->num_dmem_regions = pdata->num_dynamic_regions;
234
235 for (i = 0; i < pdata->num_dynamic_regions; ++i) {
236 if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
237 dev_warn(&pdev->dev, "device has more than "
238 __stringify(MAX_UIO_MAPS)
239 " dynamic and fixed memory regions.\n");
240 break;
241 }
242 uiomem->memtype = UIO_MEM_PHYS;
243 uiomem->addr = DMEM_MAP_ERROR;
244 uiomem->size = pdata->dynamic_region_sizes[i];
245 ++uiomem;
246 }
247
248 while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
249 uiomem->size = 0;
250 ++uiomem;
251 }
252
253 /* This driver requires no hardware specific kernel code to handle
254 * interrupts. Instead, the interrupt handler simply disables the
255 * interrupt in the interrupt controller. User space is responsible
256 * for performing hardware specific acknowledge and re-enabling of
257 * the interrupt in the interrupt controller.
258 *
259 * Interrupt sharing is not supported.
260 */
261
262 uioinfo->handler = uio_dmem_genirq_handler;
263 uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
264 uioinfo->open = uio_dmem_genirq_open;
265 uioinfo->release = uio_dmem_genirq_release;
266 uioinfo->priv = priv;
267
268 /* Enable Runtime PM for this device:
269 * The device starts in suspended state to allow the hardware to be
270 * turned off by default. The Runtime PM bus code should power on the
271 * hardware and enable clocks at open().
272 */
273 pm_runtime_enable(&pdev->dev);
274
275 ret = uio_register_device(&pdev->dev, priv->uioinfo);
276 if (ret) {
277 dev_err(&pdev->dev, "unable to register uio device\n");
278 pm_runtime_disable(&pdev->dev);
279 goto bad1;
280 }
281
282 platform_set_drvdata(pdev, priv);
283 return 0;
284 bad1:
285 kfree(priv);
286 bad0:
287 /* kfree uioinfo for OF */
288 if (pdev->dev.of_node)
289 kfree(uioinfo);
290 bad2:
291 return ret;
292}
293
294static int uio_dmem_genirq_remove(struct platform_device *pdev)
295{
296 struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev);
297
298 uio_unregister_device(priv->uioinfo);
299 pm_runtime_disable(&pdev->dev);
300
301 priv->uioinfo->handler = NULL;
302 priv->uioinfo->irqcontrol = NULL;
303
304 /* kfree uioinfo for OF */
305 if (pdev->dev.of_node)
306 kfree(priv->uioinfo);
307
308 kfree(priv);
309 return 0;
310}
311
312static int uio_dmem_genirq_runtime_nop(struct device *dev)
313{
314 /* Runtime PM callback shared between ->runtime_suspend()
315 * and ->runtime_resume(). Simply returns success.
316 *
317 * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
318 * are used at open() and release() time. This allows the
319 * Runtime PM code to turn off power to the device while the
320 * device is unused, ie before open() and after release().
321 *
322 * This Runtime PM callback does not need to save or restore
323 * any registers since user space is responsbile for hardware
324 * register reinitialization after open().
325 */
326 return 0;
327}
328
329static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
330 .runtime_suspend = uio_dmem_genirq_runtime_nop,
331 .runtime_resume = uio_dmem_genirq_runtime_nop,
332};
333
334#ifdef CONFIG_OF
335static const struct of_device_id uio_of_genirq_match[] = {
336 { /* empty for now */ },
337};
338MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
339#endif
340
341static struct platform_driver uio_dmem_genirq = {
342 .probe = uio_dmem_genirq_probe,
343 .remove = uio_dmem_genirq_remove,
344 .driver = {
345 .name = DRIVER_NAME,
346 .pm = &uio_dmem_genirq_dev_pm_ops,
347 .of_match_table = of_match_ptr(uio_of_genirq_match),
348 },
349};
350
351module_platform_driver(uio_dmem_genirq);
352
353MODULE_AUTHOR("Damian Hobson-Garcia");
354MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory.");
355MODULE_LICENSE("GPL v2");
356MODULE_ALIAS("platform:" DRIVER_NAME);