Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/cred.h>
  3#include <linux/device.h>
  4#include <linux/dma-buf.h>
  5#include <linux/dma-resv.h>
  6#include <linux/highmem.h>
  7#include <linux/init.h>
  8#include <linux/kernel.h>
  9#include <linux/memfd.h>
 10#include <linux/miscdevice.h>
 11#include <linux/module.h>
 12#include <linux/shmem_fs.h>
 13#include <linux/slab.h>
 14#include <linux/udmabuf.h>
 15#include <linux/vmalloc.h>
 16#include <linux/iosys-map.h>
 17
 18static int list_limit = 1024;
 19module_param(list_limit, int, 0644);
 20MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
 21
 22static int size_limit_mb = 64;
 23module_param(size_limit_mb, int, 0644);
 24MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
 25
 26struct udmabuf {
 27	pgoff_t pagecount;
 28	struct page **pages;
 29	struct sg_table *sg;
 30	struct miscdevice *device;
 31};
 32
 33static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
 34{
 35	struct vm_area_struct *vma = vmf->vma;
 36	struct udmabuf *ubuf = vma->vm_private_data;
 37	pgoff_t pgoff = vmf->pgoff;
 38
 39	if (pgoff >= ubuf->pagecount)
 40		return VM_FAULT_SIGBUS;
 41	vmf->page = ubuf->pages[pgoff];
 42	get_page(vmf->page);
 43	return 0;
 44}
 45
 46static const struct vm_operations_struct udmabuf_vm_ops = {
 47	.fault = udmabuf_vm_fault,
 48};
 49
 50static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
 51{
 52	struct udmabuf *ubuf = buf->priv;
 53
 54	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
 55		return -EINVAL;
 56
 57	vma->vm_ops = &udmabuf_vm_ops;
 58	vma->vm_private_data = ubuf;
 59	return 0;
 60}
 61
 62static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
 63{
 64	struct udmabuf *ubuf = buf->priv;
 65	void *vaddr;
 66
 67	dma_resv_assert_held(buf->resv);
 68
 69	vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
 70	if (!vaddr)
 71		return -EINVAL;
 72
 73	iosys_map_set_vaddr(map, vaddr);
 74	return 0;
 75}
 76
 77static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
 78{
 79	struct udmabuf *ubuf = buf->priv;
 80
 81	dma_resv_assert_held(buf->resv);
 82
 83	vm_unmap_ram(map->vaddr, ubuf->pagecount);
 84}
 85
 86static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
 87				     enum dma_data_direction direction)
 88{
 89	struct udmabuf *ubuf = buf->priv;
 90	struct sg_table *sg;
 91	int ret;
 92
 93	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
 94	if (!sg)
 95		return ERR_PTR(-ENOMEM);
 96	ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
 97					0, ubuf->pagecount << PAGE_SHIFT,
 98					GFP_KERNEL);
 99	if (ret < 0)
100		goto err;
101	ret = dma_map_sgtable(dev, sg, direction, 0);
102	if (ret < 0)
103		goto err;
 
104	return sg;
105
106err:
107	sg_free_table(sg);
108	kfree(sg);
109	return ERR_PTR(ret);
110}
111
112static void put_sg_table(struct device *dev, struct sg_table *sg,
113			 enum dma_data_direction direction)
114{
115	dma_unmap_sgtable(dev, sg, direction, 0);
116	sg_free_table(sg);
117	kfree(sg);
118}
119
120static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
121				    enum dma_data_direction direction)
122{
123	return get_sg_table(at->dev, at->dmabuf, direction);
124}
125
126static void unmap_udmabuf(struct dma_buf_attachment *at,
127			  struct sg_table *sg,
128			  enum dma_data_direction direction)
129{
130	return put_sg_table(at->dev, sg, direction);
 
 
131}
132
133static void release_udmabuf(struct dma_buf *buf)
134{
135	struct udmabuf *ubuf = buf->priv;
136	struct device *dev = ubuf->device->this_device;
137	pgoff_t pg;
138
139	if (ubuf->sg)
140		put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
141
142	for (pg = 0; pg < ubuf->pagecount; pg++)
143		put_page(ubuf->pages[pg]);
144	kfree(ubuf->pages);
145	kfree(ubuf);
146}
147
148static int begin_cpu_udmabuf(struct dma_buf *buf,
149			     enum dma_data_direction direction)
150{
151	struct udmabuf *ubuf = buf->priv;
152	struct device *dev = ubuf->device->this_device;
153	int ret = 0;
154
155	if (!ubuf->sg) {
156		ubuf->sg = get_sg_table(dev, buf, direction);
157		if (IS_ERR(ubuf->sg)) {
158			ret = PTR_ERR(ubuf->sg);
159			ubuf->sg = NULL;
160		}
161	} else {
162		dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
163				    direction);
164	}
165
166	return ret;
167}
168
169static int end_cpu_udmabuf(struct dma_buf *buf,
170			   enum dma_data_direction direction)
171{
172	struct udmabuf *ubuf = buf->priv;
173	struct device *dev = ubuf->device->this_device;
174
175	if (!ubuf->sg)
176		return -EINVAL;
177
178	dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
179	return 0;
180}
181
182static const struct dma_buf_ops udmabuf_ops = {
183	.cache_sgt_mapping = true,
184	.map_dma_buf	   = map_udmabuf,
185	.unmap_dma_buf	   = unmap_udmabuf,
186	.release	   = release_udmabuf,
187	.mmap		   = mmap_udmabuf,
188	.vmap		   = vmap_udmabuf,
189	.vunmap		   = vunmap_udmabuf,
190	.begin_cpu_access  = begin_cpu_udmabuf,
191	.end_cpu_access    = end_cpu_udmabuf,
192};
193
194#define SEALS_WANTED (F_SEAL_SHRINK)
195#define SEALS_DENIED (F_SEAL_WRITE)
196
197static long udmabuf_create(struct miscdevice *device,
198			   struct udmabuf_create_list *head,
199			   struct udmabuf_create_item *list)
200{
201	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
202	struct file *memfd = NULL;
203	struct address_space *mapping = NULL;
204	struct udmabuf *ubuf;
205	struct dma_buf *buf;
206	pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
207	struct page *page;
208	int seals, ret = -EINVAL;
209	u32 i, flags;
210
211	ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
212	if (!ubuf)
213		return -ENOMEM;
214
215	pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
216	for (i = 0; i < head->count; i++) {
217		if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
218			goto err;
219		if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
220			goto err;
221		ubuf->pagecount += list[i].size >> PAGE_SHIFT;
222		if (ubuf->pagecount > pglimit)
223			goto err;
224	}
225
226	if (!ubuf->pagecount)
227		goto err;
228
229	ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
230				    GFP_KERNEL);
231	if (!ubuf->pages) {
232		ret = -ENOMEM;
233		goto err;
234	}
235
236	pgbuf = 0;
237	for (i = 0; i < head->count; i++) {
238		ret = -EBADFD;
239		memfd = fget(list[i].memfd);
240		if (!memfd)
241			goto err;
242		mapping = memfd->f_mapping;
243		if (!shmem_mapping(mapping))
244			goto err;
245		seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
246		if (seals == -EINVAL)
247			goto err;
248		ret = -EINVAL;
249		if ((seals & SEALS_WANTED) != SEALS_WANTED ||
250		    (seals & SEALS_DENIED) != 0)
251			goto err;
252		pgoff = list[i].offset >> PAGE_SHIFT;
253		pgcnt = list[i].size   >> PAGE_SHIFT;
254		for (pgidx = 0; pgidx < pgcnt; pgidx++) {
255			page = shmem_read_mapping_page(mapping, pgoff + pgidx);
 
256			if (IS_ERR(page)) {
257				ret = PTR_ERR(page);
258				goto err;
259			}
260			ubuf->pages[pgbuf++] = page;
261		}
262		fput(memfd);
263		memfd = NULL;
264	}
265
266	exp_info.ops  = &udmabuf_ops;
267	exp_info.size = ubuf->pagecount << PAGE_SHIFT;
268	exp_info.priv = ubuf;
269	exp_info.flags = O_RDWR;
270
271	ubuf->device = device;
272	buf = dma_buf_export(&exp_info);
273	if (IS_ERR(buf)) {
274		ret = PTR_ERR(buf);
275		goto err;
276	}
277
278	flags = 0;
279	if (head->flags & UDMABUF_FLAGS_CLOEXEC)
280		flags |= O_CLOEXEC;
281	return dma_buf_fd(buf, flags);
282
283err:
284	while (pgbuf > 0)
285		put_page(ubuf->pages[--pgbuf]);
286	if (memfd)
287		fput(memfd);
288	kfree(ubuf->pages);
289	kfree(ubuf);
290	return ret;
291}
292
293static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
294{
295	struct udmabuf_create create;
296	struct udmabuf_create_list head;
297	struct udmabuf_create_item list;
298
299	if (copy_from_user(&create, (void __user *)arg,
300			   sizeof(create)))
301		return -EFAULT;
302
303	head.flags  = create.flags;
304	head.count  = 1;
305	list.memfd  = create.memfd;
306	list.offset = create.offset;
307	list.size   = create.size;
308
309	return udmabuf_create(filp->private_data, &head, &list);
310}
311
312static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
313{
314	struct udmabuf_create_list head;
315	struct udmabuf_create_item *list;
316	int ret = -EINVAL;
317	u32 lsize;
318
319	if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
320		return -EFAULT;
321	if (head.count > list_limit)
322		return -EINVAL;
323	lsize = sizeof(struct udmabuf_create_item) * head.count;
324	list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
325	if (IS_ERR(list))
326		return PTR_ERR(list);
327
328	ret = udmabuf_create(filp->private_data, &head, list);
329	kfree(list);
330	return ret;
331}
332
333static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
334			  unsigned long arg)
335{
336	long ret;
337
338	switch (ioctl) {
339	case UDMABUF_CREATE:
340		ret = udmabuf_ioctl_create(filp, arg);
341		break;
342	case UDMABUF_CREATE_LIST:
343		ret = udmabuf_ioctl_create_list(filp, arg);
344		break;
345	default:
346		ret = -ENOTTY;
347		break;
348	}
349	return ret;
350}
351
352static const struct file_operations udmabuf_fops = {
353	.owner		= THIS_MODULE,
354	.unlocked_ioctl = udmabuf_ioctl,
355#ifdef CONFIG_COMPAT
356	.compat_ioctl   = udmabuf_ioctl,
357#endif
358};
359
360static struct miscdevice udmabuf_misc = {
361	.minor          = MISC_DYNAMIC_MINOR,
362	.name           = "udmabuf",
363	.fops           = &udmabuf_fops,
364};
365
366static int __init udmabuf_dev_init(void)
367{
368	int ret;
369
370	ret = misc_register(&udmabuf_misc);
371	if (ret < 0) {
372		pr_err("Could not initialize udmabuf device\n");
373		return ret;
374	}
375
376	ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
377					   DMA_BIT_MASK(64));
378	if (ret < 0) {
379		pr_err("Could not setup DMA mask for udmabuf device\n");
380		misc_deregister(&udmabuf_misc);
381		return ret;
382	}
383
384	return 0;
385}
386
387static void __exit udmabuf_dev_exit(void)
388{
389	misc_deregister(&udmabuf_misc);
390}
391
392module_init(udmabuf_dev_init)
393module_exit(udmabuf_dev_exit)
394
395MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/cred.h>
  3#include <linux/device.h>
  4#include <linux/dma-buf.h>
 
  5#include <linux/highmem.h>
  6#include <linux/init.h>
  7#include <linux/kernel.h>
  8#include <linux/memfd.h>
  9#include <linux/miscdevice.h>
 10#include <linux/module.h>
 11#include <linux/shmem_fs.h>
 12#include <linux/slab.h>
 13#include <linux/udmabuf.h>
 
 
 14
 15static const u32    list_limit = 1024;  /* udmabuf_create_list->count limit */
 16static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
 
 
 
 
 
 17
 18struct udmabuf {
 19	pgoff_t pagecount;
 20	struct page **pages;
 
 
 21};
 22
 23static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
 24{
 25	struct vm_area_struct *vma = vmf->vma;
 26	struct udmabuf *ubuf = vma->vm_private_data;
 
 27
 28	vmf->page = ubuf->pages[vmf->pgoff];
 
 
 29	get_page(vmf->page);
 30	return 0;
 31}
 32
 33static const struct vm_operations_struct udmabuf_vm_ops = {
 34	.fault = udmabuf_vm_fault,
 35};
 36
 37static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
 38{
 39	struct udmabuf *ubuf = buf->priv;
 40
 41	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
 42		return -EINVAL;
 43
 44	vma->vm_ops = &udmabuf_vm_ops;
 45	vma->vm_private_data = ubuf;
 46	return 0;
 47}
 48
 49static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
 50				    enum dma_data_direction direction)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51{
 52	struct udmabuf *ubuf = at->dmabuf->priv;
 53	struct sg_table *sg;
 54	int ret;
 55
 56	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
 57	if (!sg)
 58		return ERR_PTR(-ENOMEM);
 59	ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
 60					0, ubuf->pagecount << PAGE_SHIFT,
 61					GFP_KERNEL);
 62	if (ret < 0)
 63		goto err;
 64	if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
 65		ret = -EINVAL;
 66		goto err;
 67	}
 68	return sg;
 69
 70err:
 71	sg_free_table(sg);
 72	kfree(sg);
 73	return ERR_PTR(ret);
 74}
 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76static void unmap_udmabuf(struct dma_buf_attachment *at,
 77			  struct sg_table *sg,
 78			  enum dma_data_direction direction)
 79{
 80	dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
 81	sg_free_table(sg);
 82	kfree(sg);
 83}
 84
 85static void release_udmabuf(struct dma_buf *buf)
 86{
 87	struct udmabuf *ubuf = buf->priv;
 
 88	pgoff_t pg;
 89
 
 
 
 90	for (pg = 0; pg < ubuf->pagecount; pg++)
 91		put_page(ubuf->pages[pg]);
 92	kfree(ubuf->pages);
 93	kfree(ubuf);
 94}
 95
 96static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
 
 97{
 98	struct udmabuf *ubuf = buf->priv;
 99	struct page *page = ubuf->pages[page_num];
 
 
 
 
 
 
 
 
 
 
 
 
100
101	return kmap(page);
102}
103
104static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
105			   void *vaddr)
106{
107	kunmap(vaddr);
 
 
 
 
 
 
 
108}
109
110static const struct dma_buf_ops udmabuf_ops = {
111	.map_dma_buf	  = map_udmabuf,
112	.unmap_dma_buf	  = unmap_udmabuf,
113	.release	  = release_udmabuf,
114	.map		  = kmap_udmabuf,
115	.unmap		  = kunmap_udmabuf,
116	.mmap		  = mmap_udmabuf,
 
 
 
117};
118
119#define SEALS_WANTED (F_SEAL_SHRINK)
120#define SEALS_DENIED (F_SEAL_WRITE)
121
122static long udmabuf_create(const struct udmabuf_create_list *head,
123			   const struct udmabuf_create_item *list)
 
124{
125	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
126	struct file *memfd = NULL;
 
127	struct udmabuf *ubuf;
128	struct dma_buf *buf;
129	pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
130	struct page *page;
131	int seals, ret = -EINVAL;
132	u32 i, flags;
133
134	ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
135	if (!ubuf)
136		return -ENOMEM;
137
138	pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
139	for (i = 0; i < head->count; i++) {
140		if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
141			goto err;
142		if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
143			goto err;
144		ubuf->pagecount += list[i].size >> PAGE_SHIFT;
145		if (ubuf->pagecount > pglimit)
146			goto err;
147	}
 
 
 
 
148	ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
149				    GFP_KERNEL);
150	if (!ubuf->pages) {
151		ret = -ENOMEM;
152		goto err;
153	}
154
155	pgbuf = 0;
156	for (i = 0; i < head->count; i++) {
157		ret = -EBADFD;
158		memfd = fget(list[i].memfd);
159		if (!memfd)
160			goto err;
161		if (!shmem_mapping(file_inode(memfd)->i_mapping))
 
162			goto err;
163		seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
164		if (seals == -EINVAL)
165			goto err;
166		ret = -EINVAL;
167		if ((seals & SEALS_WANTED) != SEALS_WANTED ||
168		    (seals & SEALS_DENIED) != 0)
169			goto err;
170		pgoff = list[i].offset >> PAGE_SHIFT;
171		pgcnt = list[i].size   >> PAGE_SHIFT;
172		for (pgidx = 0; pgidx < pgcnt; pgidx++) {
173			page = shmem_read_mapping_page(
174				file_inode(memfd)->i_mapping, pgoff + pgidx);
175			if (IS_ERR(page)) {
176				ret = PTR_ERR(page);
177				goto err;
178			}
179			ubuf->pages[pgbuf++] = page;
180		}
181		fput(memfd);
182		memfd = NULL;
183	}
184
185	exp_info.ops  = &udmabuf_ops;
186	exp_info.size = ubuf->pagecount << PAGE_SHIFT;
187	exp_info.priv = ubuf;
188	exp_info.flags = O_RDWR;
189
 
190	buf = dma_buf_export(&exp_info);
191	if (IS_ERR(buf)) {
192		ret = PTR_ERR(buf);
193		goto err;
194	}
195
196	flags = 0;
197	if (head->flags & UDMABUF_FLAGS_CLOEXEC)
198		flags |= O_CLOEXEC;
199	return dma_buf_fd(buf, flags);
200
201err:
202	while (pgbuf > 0)
203		put_page(ubuf->pages[--pgbuf]);
204	if (memfd)
205		fput(memfd);
206	kfree(ubuf->pages);
207	kfree(ubuf);
208	return ret;
209}
210
211static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
212{
213	struct udmabuf_create create;
214	struct udmabuf_create_list head;
215	struct udmabuf_create_item list;
216
217	if (copy_from_user(&create, (void __user *)arg,
218			   sizeof(create)))
219		return -EFAULT;
220
221	head.flags  = create.flags;
222	head.count  = 1;
223	list.memfd  = create.memfd;
224	list.offset = create.offset;
225	list.size   = create.size;
226
227	return udmabuf_create(&head, &list);
228}
229
230static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
231{
232	struct udmabuf_create_list head;
233	struct udmabuf_create_item *list;
234	int ret = -EINVAL;
235	u32 lsize;
236
237	if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
238		return -EFAULT;
239	if (head.count > list_limit)
240		return -EINVAL;
241	lsize = sizeof(struct udmabuf_create_item) * head.count;
242	list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
243	if (IS_ERR(list))
244		return PTR_ERR(list);
245
246	ret = udmabuf_create(&head, list);
247	kfree(list);
248	return ret;
249}
250
251static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
252			  unsigned long arg)
253{
254	long ret;
255
256	switch (ioctl) {
257	case UDMABUF_CREATE:
258		ret = udmabuf_ioctl_create(filp, arg);
259		break;
260	case UDMABUF_CREATE_LIST:
261		ret = udmabuf_ioctl_create_list(filp, arg);
262		break;
263	default:
264		ret = -ENOTTY;
265		break;
266	}
267	return ret;
268}
269
270static const struct file_operations udmabuf_fops = {
271	.owner		= THIS_MODULE,
272	.unlocked_ioctl = udmabuf_ioctl,
 
 
 
273};
274
275static struct miscdevice udmabuf_misc = {
276	.minor          = MISC_DYNAMIC_MINOR,
277	.name           = "udmabuf",
278	.fops           = &udmabuf_fops,
279};
280
281static int __init udmabuf_dev_init(void)
282{
283	return misc_register(&udmabuf_misc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284}
285
286static void __exit udmabuf_dev_exit(void)
287{
288	misc_deregister(&udmabuf_misc);
289}
290
291module_init(udmabuf_dev_init)
292module_exit(udmabuf_dev_exit)
293
294MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
295MODULE_LICENSE("GPL v2");