Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/cred.h>
3#include <linux/device.h>
4#include <linux/dma-buf.h>
5#include <linux/dma-resv.h>
6#include <linux/highmem.h>
7#include <linux/init.h>
8#include <linux/kernel.h>
9#include <linux/memfd.h>
10#include <linux/miscdevice.h>
11#include <linux/module.h>
12#include <linux/shmem_fs.h>
13#include <linux/slab.h>
14#include <linux/udmabuf.h>
15#include <linux/vmalloc.h>
16#include <linux/iosys-map.h>
17
18static int list_limit = 1024;
19module_param(list_limit, int, 0644);
20MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
21
22static int size_limit_mb = 64;
23module_param(size_limit_mb, int, 0644);
24MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
25
26struct udmabuf {
27 pgoff_t pagecount;
28 struct page **pages;
29 struct sg_table *sg;
30 struct miscdevice *device;
31};
32
33static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
34{
35 struct vm_area_struct *vma = vmf->vma;
36 struct udmabuf *ubuf = vma->vm_private_data;
37 pgoff_t pgoff = vmf->pgoff;
38
39 if (pgoff >= ubuf->pagecount)
40 return VM_FAULT_SIGBUS;
41 vmf->page = ubuf->pages[pgoff];
42 get_page(vmf->page);
43 return 0;
44}
45
46static const struct vm_operations_struct udmabuf_vm_ops = {
47 .fault = udmabuf_vm_fault,
48};
49
50static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
51{
52 struct udmabuf *ubuf = buf->priv;
53
54 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
55 return -EINVAL;
56
57 vma->vm_ops = &udmabuf_vm_ops;
58 vma->vm_private_data = ubuf;
59 return 0;
60}
61
62static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
63{
64 struct udmabuf *ubuf = buf->priv;
65 void *vaddr;
66
67 dma_resv_assert_held(buf->resv);
68
69 vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
70 if (!vaddr)
71 return -EINVAL;
72
73 iosys_map_set_vaddr(map, vaddr);
74 return 0;
75}
76
77static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
78{
79 struct udmabuf *ubuf = buf->priv;
80
81 dma_resv_assert_held(buf->resv);
82
83 vm_unmap_ram(map->vaddr, ubuf->pagecount);
84}
85
86static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
87 enum dma_data_direction direction)
88{
89 struct udmabuf *ubuf = buf->priv;
90 struct sg_table *sg;
91 int ret;
92
93 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
94 if (!sg)
95 return ERR_PTR(-ENOMEM);
96 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
97 0, ubuf->pagecount << PAGE_SHIFT,
98 GFP_KERNEL);
99 if (ret < 0)
100 goto err;
101 ret = dma_map_sgtable(dev, sg, direction, 0);
102 if (ret < 0)
103 goto err;
104 return sg;
105
106err:
107 sg_free_table(sg);
108 kfree(sg);
109 return ERR_PTR(ret);
110}
111
112static void put_sg_table(struct device *dev, struct sg_table *sg,
113 enum dma_data_direction direction)
114{
115 dma_unmap_sgtable(dev, sg, direction, 0);
116 sg_free_table(sg);
117 kfree(sg);
118}
119
120static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
121 enum dma_data_direction direction)
122{
123 return get_sg_table(at->dev, at->dmabuf, direction);
124}
125
126static void unmap_udmabuf(struct dma_buf_attachment *at,
127 struct sg_table *sg,
128 enum dma_data_direction direction)
129{
130 return put_sg_table(at->dev, sg, direction);
131}
132
133static void release_udmabuf(struct dma_buf *buf)
134{
135 struct udmabuf *ubuf = buf->priv;
136 struct device *dev = ubuf->device->this_device;
137 pgoff_t pg;
138
139 if (ubuf->sg)
140 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
141
142 for (pg = 0; pg < ubuf->pagecount; pg++)
143 put_page(ubuf->pages[pg]);
144 kfree(ubuf->pages);
145 kfree(ubuf);
146}
147
148static int begin_cpu_udmabuf(struct dma_buf *buf,
149 enum dma_data_direction direction)
150{
151 struct udmabuf *ubuf = buf->priv;
152 struct device *dev = ubuf->device->this_device;
153 int ret = 0;
154
155 if (!ubuf->sg) {
156 ubuf->sg = get_sg_table(dev, buf, direction);
157 if (IS_ERR(ubuf->sg)) {
158 ret = PTR_ERR(ubuf->sg);
159 ubuf->sg = NULL;
160 }
161 } else {
162 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
163 direction);
164 }
165
166 return ret;
167}
168
169static int end_cpu_udmabuf(struct dma_buf *buf,
170 enum dma_data_direction direction)
171{
172 struct udmabuf *ubuf = buf->priv;
173 struct device *dev = ubuf->device->this_device;
174
175 if (!ubuf->sg)
176 return -EINVAL;
177
178 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
179 return 0;
180}
181
182static const struct dma_buf_ops udmabuf_ops = {
183 .cache_sgt_mapping = true,
184 .map_dma_buf = map_udmabuf,
185 .unmap_dma_buf = unmap_udmabuf,
186 .release = release_udmabuf,
187 .mmap = mmap_udmabuf,
188 .vmap = vmap_udmabuf,
189 .vunmap = vunmap_udmabuf,
190 .begin_cpu_access = begin_cpu_udmabuf,
191 .end_cpu_access = end_cpu_udmabuf,
192};
193
194#define SEALS_WANTED (F_SEAL_SHRINK)
195#define SEALS_DENIED (F_SEAL_WRITE)
196
197static long udmabuf_create(struct miscdevice *device,
198 struct udmabuf_create_list *head,
199 struct udmabuf_create_item *list)
200{
201 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
202 struct file *memfd = NULL;
203 struct address_space *mapping = NULL;
204 struct udmabuf *ubuf;
205 struct dma_buf *buf;
206 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
207 struct page *page;
208 int seals, ret = -EINVAL;
209 u32 i, flags;
210
211 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
212 if (!ubuf)
213 return -ENOMEM;
214
215 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
216 for (i = 0; i < head->count; i++) {
217 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
218 goto err;
219 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
220 goto err;
221 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
222 if (ubuf->pagecount > pglimit)
223 goto err;
224 }
225
226 if (!ubuf->pagecount)
227 goto err;
228
229 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
230 GFP_KERNEL);
231 if (!ubuf->pages) {
232 ret = -ENOMEM;
233 goto err;
234 }
235
236 pgbuf = 0;
237 for (i = 0; i < head->count; i++) {
238 ret = -EBADFD;
239 memfd = fget(list[i].memfd);
240 if (!memfd)
241 goto err;
242 mapping = memfd->f_mapping;
243 if (!shmem_mapping(mapping))
244 goto err;
245 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
246 if (seals == -EINVAL)
247 goto err;
248 ret = -EINVAL;
249 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
250 (seals & SEALS_DENIED) != 0)
251 goto err;
252 pgoff = list[i].offset >> PAGE_SHIFT;
253 pgcnt = list[i].size >> PAGE_SHIFT;
254 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
255 page = shmem_read_mapping_page(mapping, pgoff + pgidx);
256 if (IS_ERR(page)) {
257 ret = PTR_ERR(page);
258 goto err;
259 }
260 ubuf->pages[pgbuf++] = page;
261 }
262 fput(memfd);
263 memfd = NULL;
264 }
265
266 exp_info.ops = &udmabuf_ops;
267 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
268 exp_info.priv = ubuf;
269 exp_info.flags = O_RDWR;
270
271 ubuf->device = device;
272 buf = dma_buf_export(&exp_info);
273 if (IS_ERR(buf)) {
274 ret = PTR_ERR(buf);
275 goto err;
276 }
277
278 flags = 0;
279 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
280 flags |= O_CLOEXEC;
281 return dma_buf_fd(buf, flags);
282
283err:
284 while (pgbuf > 0)
285 put_page(ubuf->pages[--pgbuf]);
286 if (memfd)
287 fput(memfd);
288 kfree(ubuf->pages);
289 kfree(ubuf);
290 return ret;
291}
292
293static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
294{
295 struct udmabuf_create create;
296 struct udmabuf_create_list head;
297 struct udmabuf_create_item list;
298
299 if (copy_from_user(&create, (void __user *)arg,
300 sizeof(create)))
301 return -EFAULT;
302
303 head.flags = create.flags;
304 head.count = 1;
305 list.memfd = create.memfd;
306 list.offset = create.offset;
307 list.size = create.size;
308
309 return udmabuf_create(filp->private_data, &head, &list);
310}
311
312static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
313{
314 struct udmabuf_create_list head;
315 struct udmabuf_create_item *list;
316 int ret = -EINVAL;
317 u32 lsize;
318
319 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
320 return -EFAULT;
321 if (head.count > list_limit)
322 return -EINVAL;
323 lsize = sizeof(struct udmabuf_create_item) * head.count;
324 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
325 if (IS_ERR(list))
326 return PTR_ERR(list);
327
328 ret = udmabuf_create(filp->private_data, &head, list);
329 kfree(list);
330 return ret;
331}
332
333static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
334 unsigned long arg)
335{
336 long ret;
337
338 switch (ioctl) {
339 case UDMABUF_CREATE:
340 ret = udmabuf_ioctl_create(filp, arg);
341 break;
342 case UDMABUF_CREATE_LIST:
343 ret = udmabuf_ioctl_create_list(filp, arg);
344 break;
345 default:
346 ret = -ENOTTY;
347 break;
348 }
349 return ret;
350}
351
352static const struct file_operations udmabuf_fops = {
353 .owner = THIS_MODULE,
354 .unlocked_ioctl = udmabuf_ioctl,
355#ifdef CONFIG_COMPAT
356 .compat_ioctl = udmabuf_ioctl,
357#endif
358};
359
360static struct miscdevice udmabuf_misc = {
361 .minor = MISC_DYNAMIC_MINOR,
362 .name = "udmabuf",
363 .fops = &udmabuf_fops,
364};
365
366static int __init udmabuf_dev_init(void)
367{
368 int ret;
369
370 ret = misc_register(&udmabuf_misc);
371 if (ret < 0) {
372 pr_err("Could not initialize udmabuf device\n");
373 return ret;
374 }
375
376 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
377 DMA_BIT_MASK(64));
378 if (ret < 0) {
379 pr_err("Could not setup DMA mask for udmabuf device\n");
380 misc_deregister(&udmabuf_misc);
381 return ret;
382 }
383
384 return 0;
385}
386
387static void __exit udmabuf_dev_exit(void)
388{
389 misc_deregister(&udmabuf_misc);
390}
391
392module_init(udmabuf_dev_init)
393module_exit(udmabuf_dev_exit)
394
395MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/cred.h>
3#include <linux/device.h>
4#include <linux/dma-buf.h>
5#include <linux/dma-resv.h>
6#include <linux/highmem.h>
7#include <linux/init.h>
8#include <linux/kernel.h>
9#include <linux/memfd.h>
10#include <linux/miscdevice.h>
11#include <linux/module.h>
12#include <linux/shmem_fs.h>
13#include <linux/slab.h>
14#include <linux/udmabuf.h>
15#include <linux/hugetlb.h>
16
17static int list_limit = 1024;
18module_param(list_limit, int, 0644);
19MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
20
21static int size_limit_mb = 64;
22module_param(size_limit_mb, int, 0644);
23MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
24
25struct udmabuf {
26 pgoff_t pagecount;
27 struct page **pages;
28 struct sg_table *sg;
29 struct miscdevice *device;
30};
31
32static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
33{
34 struct vm_area_struct *vma = vmf->vma;
35 struct udmabuf *ubuf = vma->vm_private_data;
36 pgoff_t pgoff = vmf->pgoff;
37
38 if (pgoff >= ubuf->pagecount)
39 return VM_FAULT_SIGBUS;
40 vmf->page = ubuf->pages[pgoff];
41 get_page(vmf->page);
42 return 0;
43}
44
45static const struct vm_operations_struct udmabuf_vm_ops = {
46 .fault = udmabuf_vm_fault,
47};
48
49static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
50{
51 struct udmabuf *ubuf = buf->priv;
52
53 dma_resv_assert_held(buf->resv);
54
55 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
56 return -EINVAL;
57
58 vma->vm_ops = &udmabuf_vm_ops;
59 vma->vm_private_data = ubuf;
60 return 0;
61}
62
63static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
64 enum dma_data_direction direction)
65{
66 struct udmabuf *ubuf = buf->priv;
67 struct sg_table *sg;
68 int ret;
69
70 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
71 if (!sg)
72 return ERR_PTR(-ENOMEM);
73 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
74 0, ubuf->pagecount << PAGE_SHIFT,
75 GFP_KERNEL);
76 if (ret < 0)
77 goto err;
78 ret = dma_map_sgtable(dev, sg, direction, 0);
79 if (ret < 0)
80 goto err;
81 return sg;
82
83err:
84 sg_free_table(sg);
85 kfree(sg);
86 return ERR_PTR(ret);
87}
88
89static void put_sg_table(struct device *dev, struct sg_table *sg,
90 enum dma_data_direction direction)
91{
92 dma_unmap_sgtable(dev, sg, direction, 0);
93 sg_free_table(sg);
94 kfree(sg);
95}
96
97static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
98 enum dma_data_direction direction)
99{
100 return get_sg_table(at->dev, at->dmabuf, direction);
101}
102
103static void unmap_udmabuf(struct dma_buf_attachment *at,
104 struct sg_table *sg,
105 enum dma_data_direction direction)
106{
107 return put_sg_table(at->dev, sg, direction);
108}
109
110static void release_udmabuf(struct dma_buf *buf)
111{
112 struct udmabuf *ubuf = buf->priv;
113 struct device *dev = ubuf->device->this_device;
114 pgoff_t pg;
115
116 if (ubuf->sg)
117 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
118
119 for (pg = 0; pg < ubuf->pagecount; pg++)
120 put_page(ubuf->pages[pg]);
121 kfree(ubuf->pages);
122 kfree(ubuf);
123}
124
125static int begin_cpu_udmabuf(struct dma_buf *buf,
126 enum dma_data_direction direction)
127{
128 struct udmabuf *ubuf = buf->priv;
129 struct device *dev = ubuf->device->this_device;
130 int ret = 0;
131
132 if (!ubuf->sg) {
133 ubuf->sg = get_sg_table(dev, buf, direction);
134 if (IS_ERR(ubuf->sg)) {
135 ret = PTR_ERR(ubuf->sg);
136 ubuf->sg = NULL;
137 }
138 } else {
139 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
140 direction);
141 }
142
143 return ret;
144}
145
146static int end_cpu_udmabuf(struct dma_buf *buf,
147 enum dma_data_direction direction)
148{
149 struct udmabuf *ubuf = buf->priv;
150 struct device *dev = ubuf->device->this_device;
151
152 if (!ubuf->sg)
153 return -EINVAL;
154
155 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
156 return 0;
157}
158
159static const struct dma_buf_ops udmabuf_ops = {
160 .cache_sgt_mapping = true,
161 .map_dma_buf = map_udmabuf,
162 .unmap_dma_buf = unmap_udmabuf,
163 .release = release_udmabuf,
164 .mmap = mmap_udmabuf,
165 .begin_cpu_access = begin_cpu_udmabuf,
166 .end_cpu_access = end_cpu_udmabuf,
167};
168
169#define SEALS_WANTED (F_SEAL_SHRINK)
170#define SEALS_DENIED (F_SEAL_WRITE)
171
172static long udmabuf_create(struct miscdevice *device,
173 struct udmabuf_create_list *head,
174 struct udmabuf_create_item *list)
175{
176 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
177 struct file *memfd = NULL;
178 struct address_space *mapping = NULL;
179 struct udmabuf *ubuf;
180 struct dma_buf *buf;
181 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
182 struct page *page, *hpage = NULL;
183 pgoff_t subpgoff, maxsubpgs;
184 struct hstate *hpstate;
185 int seals, ret = -EINVAL;
186 u32 i, flags;
187
188 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
189 if (!ubuf)
190 return -ENOMEM;
191
192 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
193 for (i = 0; i < head->count; i++) {
194 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
195 goto err;
196 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
197 goto err;
198 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
199 if (ubuf->pagecount > pglimit)
200 goto err;
201 }
202
203 if (!ubuf->pagecount)
204 goto err;
205
206 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
207 GFP_KERNEL);
208 if (!ubuf->pages) {
209 ret = -ENOMEM;
210 goto err;
211 }
212
213 pgbuf = 0;
214 for (i = 0; i < head->count; i++) {
215 ret = -EBADFD;
216 memfd = fget(list[i].memfd);
217 if (!memfd)
218 goto err;
219 mapping = memfd->f_mapping;
220 if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
221 goto err;
222 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
223 if (seals == -EINVAL)
224 goto err;
225 ret = -EINVAL;
226 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
227 (seals & SEALS_DENIED) != 0)
228 goto err;
229 pgoff = list[i].offset >> PAGE_SHIFT;
230 pgcnt = list[i].size >> PAGE_SHIFT;
231 if (is_file_hugepages(memfd)) {
232 hpstate = hstate_file(memfd);
233 pgoff = list[i].offset >> huge_page_shift(hpstate);
234 subpgoff = (list[i].offset &
235 ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
236 maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
237 }
238 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
239 if (is_file_hugepages(memfd)) {
240 if (!hpage) {
241 hpage = find_get_page_flags(mapping, pgoff,
242 FGP_ACCESSED);
243 if (!hpage) {
244 ret = -EINVAL;
245 goto err;
246 }
247 }
248 page = hpage + subpgoff;
249 get_page(page);
250 subpgoff++;
251 if (subpgoff == maxsubpgs) {
252 put_page(hpage);
253 hpage = NULL;
254 subpgoff = 0;
255 pgoff++;
256 }
257 } else {
258 page = shmem_read_mapping_page(mapping,
259 pgoff + pgidx);
260 if (IS_ERR(page)) {
261 ret = PTR_ERR(page);
262 goto err;
263 }
264 }
265 ubuf->pages[pgbuf++] = page;
266 }
267 fput(memfd);
268 memfd = NULL;
269 if (hpage) {
270 put_page(hpage);
271 hpage = NULL;
272 }
273 }
274
275 exp_info.ops = &udmabuf_ops;
276 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
277 exp_info.priv = ubuf;
278 exp_info.flags = O_RDWR;
279
280 ubuf->device = device;
281 buf = dma_buf_export(&exp_info);
282 if (IS_ERR(buf)) {
283 ret = PTR_ERR(buf);
284 goto err;
285 }
286
287 flags = 0;
288 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
289 flags |= O_CLOEXEC;
290 return dma_buf_fd(buf, flags);
291
292err:
293 while (pgbuf > 0)
294 put_page(ubuf->pages[--pgbuf]);
295 if (memfd)
296 fput(memfd);
297 kfree(ubuf->pages);
298 kfree(ubuf);
299 return ret;
300}
301
302static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
303{
304 struct udmabuf_create create;
305 struct udmabuf_create_list head;
306 struct udmabuf_create_item list;
307
308 if (copy_from_user(&create, (void __user *)arg,
309 sizeof(create)))
310 return -EFAULT;
311
312 head.flags = create.flags;
313 head.count = 1;
314 list.memfd = create.memfd;
315 list.offset = create.offset;
316 list.size = create.size;
317
318 return udmabuf_create(filp->private_data, &head, &list);
319}
320
321static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
322{
323 struct udmabuf_create_list head;
324 struct udmabuf_create_item *list;
325 int ret = -EINVAL;
326 u32 lsize;
327
328 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
329 return -EFAULT;
330 if (head.count > list_limit)
331 return -EINVAL;
332 lsize = sizeof(struct udmabuf_create_item) * head.count;
333 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
334 if (IS_ERR(list))
335 return PTR_ERR(list);
336
337 ret = udmabuf_create(filp->private_data, &head, list);
338 kfree(list);
339 return ret;
340}
341
342static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
343 unsigned long arg)
344{
345 long ret;
346
347 switch (ioctl) {
348 case UDMABUF_CREATE:
349 ret = udmabuf_ioctl_create(filp, arg);
350 break;
351 case UDMABUF_CREATE_LIST:
352 ret = udmabuf_ioctl_create_list(filp, arg);
353 break;
354 default:
355 ret = -ENOTTY;
356 break;
357 }
358 return ret;
359}
360
361static const struct file_operations udmabuf_fops = {
362 .owner = THIS_MODULE,
363 .unlocked_ioctl = udmabuf_ioctl,
364#ifdef CONFIG_COMPAT
365 .compat_ioctl = udmabuf_ioctl,
366#endif
367};
368
369static struct miscdevice udmabuf_misc = {
370 .minor = MISC_DYNAMIC_MINOR,
371 .name = "udmabuf",
372 .fops = &udmabuf_fops,
373};
374
375static int __init udmabuf_dev_init(void)
376{
377 int ret;
378
379 ret = misc_register(&udmabuf_misc);
380 if (ret < 0) {
381 pr_err("Could not initialize udmabuf device\n");
382 return ret;
383 }
384
385 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
386 DMA_BIT_MASK(64));
387 if (ret < 0) {
388 pr_err("Could not setup DMA mask for udmabuf device\n");
389 misc_deregister(&udmabuf_misc);
390 return ret;
391 }
392
393 return 0;
394}
395
396static void __exit udmabuf_dev_exit(void)
397{
398 misc_deregister(&udmabuf_misc);
399}
400
401module_init(udmabuf_dev_init)
402module_exit(udmabuf_dev_exit)
403
404MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
405MODULE_LICENSE("GPL v2");