Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2011 Red Hat, Inc.
  3 * Copyright © 2014 The Chromium OS Authors
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software")
  7 * to deal in the software without restriction, including without limitation
  8 * on the rights to use, copy, modify, merge, publish, distribute, sub
  9 * license, and/or sell copies of the Software, and to permit persons to whom
 10 * them Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice (including the next
 13 * paragraph) shall be included in all copies or substantial portions of the
 14 * Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
 19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
 20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *	Adam Jackson <ajax@redhat.com>
 25 *	Ben Widawsky <ben@bwidawsk.net>
 26 */
 27
 28/**
 29 * This is vgem, a (non-hardware-backed) GEM service.  This is used by Mesa's
 30 * software renderer and the X server for efficient buffer sharing.
 31 */
 32
 33#include <linux/dma-buf.h>
 34#include <linux/module.h>
 35#include <linux/platform_device.h>
 36#include <linux/shmem_fs.h>
 37#include <linux/vmalloc.h>
 38
 39#include <drm/drm_drv.h>
 40#include <drm/drm_file.h>
 41#include <drm/drm_ioctl.h>
 42#include <drm/drm_prime.h>
 43
 44#include "vgem_drv.h"
 45
 46#define DRIVER_NAME	"vgem"
 47#define DRIVER_DESC	"Virtual GEM provider"
 48#define DRIVER_DATE	"20120112"
 49#define DRIVER_MAJOR	1
 50#define DRIVER_MINOR	0
 51
 52static struct vgem_device {
 53	struct drm_device drm;
 54	struct platform_device *platform;
 55} *vgem_device;
 56
 57static void vgem_gem_free_object(struct drm_gem_object *obj)
 58{
 59	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
 60
 61	kvfree(vgem_obj->pages);
 62	mutex_destroy(&vgem_obj->pages_lock);
 63
 64	if (obj->import_attach)
 65		drm_prime_gem_destroy(obj, vgem_obj->table);
 66
 67	drm_gem_object_release(obj);
 68	kfree(vgem_obj);
 69}
 70
 71static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
 72{
 73	struct vm_area_struct *vma = vmf->vma;
 74	struct drm_vgem_gem_object *obj = vma->vm_private_data;
 75	/* We don't use vmf->pgoff since that has the fake offset */
 76	unsigned long vaddr = vmf->address;
 77	vm_fault_t ret = VM_FAULT_SIGBUS;
 78	loff_t num_pages;
 79	pgoff_t page_offset;
 80	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
 81
 82	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
 83
 84	if (page_offset >= num_pages)
 85		return VM_FAULT_SIGBUS;
 86
 87	mutex_lock(&obj->pages_lock);
 88	if (obj->pages) {
 89		get_page(obj->pages[page_offset]);
 90		vmf->page = obj->pages[page_offset];
 91		ret = 0;
 92	}
 93	mutex_unlock(&obj->pages_lock);
 94	if (ret) {
 95		struct page *page;
 96
 97		page = shmem_read_mapping_page(
 98					file_inode(obj->base.filp)->i_mapping,
 99					page_offset);
100		if (!IS_ERR(page)) {
101			vmf->page = page;
102			ret = 0;
103		} else switch (PTR_ERR(page)) {
104			case -ENOSPC:
105			case -ENOMEM:
106				ret = VM_FAULT_OOM;
107				break;
108			case -EBUSY:
109				ret = VM_FAULT_RETRY;
110				break;
111			case -EFAULT:
112			case -EINVAL:
113				ret = VM_FAULT_SIGBUS;
114				break;
115			default:
116				WARN_ON(PTR_ERR(page));
117				ret = VM_FAULT_SIGBUS;
118				break;
119		}
120
121	}
122	return ret;
123}
124
125static const struct vm_operations_struct vgem_gem_vm_ops = {
126	.fault = vgem_gem_fault,
127	.open = drm_gem_vm_open,
128	.close = drm_gem_vm_close,
129};
130
131static int vgem_open(struct drm_device *dev, struct drm_file *file)
132{
133	struct vgem_file *vfile;
134	int ret;
135
136	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
137	if (!vfile)
138		return -ENOMEM;
139
140	file->driver_priv = vfile;
141
142	ret = vgem_fence_open(vfile);
143	if (ret) {
144		kfree(vfile);
145		return ret;
146	}
147
148	return 0;
149}
150
151static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
152{
153	struct vgem_file *vfile = file->driver_priv;
154
155	vgem_fence_close(vfile);
156	kfree(vfile);
157}
158
159static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
160						unsigned long size)
161{
162	struct drm_vgem_gem_object *obj;
163	int ret;
164
165	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
166	if (!obj)
167		return ERR_PTR(-ENOMEM);
168
169	ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
170	if (ret) {
171		kfree(obj);
172		return ERR_PTR(ret);
173	}
174
175	mutex_init(&obj->pages_lock);
176
177	return obj;
178}
179
180static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
181{
182	drm_gem_object_release(&obj->base);
183	kfree(obj);
184}
185
186static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
187					      struct drm_file *file,
188					      unsigned int *handle,
189					      unsigned long size)
190{
191	struct drm_vgem_gem_object *obj;
192	int ret;
193
194	obj = __vgem_gem_create(dev, size);
195	if (IS_ERR(obj))
196		return ERR_CAST(obj);
197
198	ret = drm_gem_handle_create(file, &obj->base, handle);
199	drm_gem_object_put_unlocked(&obj->base);
200	if (ret)
201		return ERR_PTR(ret);
202
203	return &obj->base;
204}
205
206static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
207				struct drm_mode_create_dumb *args)
208{
209	struct drm_gem_object *gem_object;
210	u64 pitch, size;
211
212	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
213	size = args->height * pitch;
214	if (size == 0)
215		return -EINVAL;
216
217	gem_object = vgem_gem_create(dev, file, &args->handle, size);
218	if (IS_ERR(gem_object))
219		return PTR_ERR(gem_object);
220
221	args->size = gem_object->size;
222	args->pitch = pitch;
223
224	DRM_DEBUG("Created object of size %lld\n", size);
225
226	return 0;
227}
228
229static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
230			     uint32_t handle, uint64_t *offset)
231{
232	struct drm_gem_object *obj;
233	int ret;
234
235	obj = drm_gem_object_lookup(file, handle);
236	if (!obj)
237		return -ENOENT;
238
239	if (!obj->filp) {
240		ret = -EINVAL;
241		goto unref;
242	}
243
244	ret = drm_gem_create_mmap_offset(obj);
245	if (ret)
246		goto unref;
247
248	*offset = drm_vma_node_offset_addr(&obj->vma_node);
249unref:
250	drm_gem_object_put_unlocked(obj);
251
252	return ret;
253}
254
255static struct drm_ioctl_desc vgem_ioctls[] = {
256	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
257	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
258};
259
260static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
261{
262	unsigned long flags = vma->vm_flags;
263	int ret;
264
265	ret = drm_gem_mmap(filp, vma);
266	if (ret)
267		return ret;
268
269	/* Keep the WC mmaping set by drm_gem_mmap() but our pages
270	 * are ordinary and not special.
271	 */
272	vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
273	return 0;
274}
275
276static const struct file_operations vgem_driver_fops = {
277	.owner		= THIS_MODULE,
278	.open		= drm_open,
279	.mmap		= vgem_mmap,
280	.poll		= drm_poll,
281	.read		= drm_read,
282	.unlocked_ioctl = drm_ioctl,
283	.compat_ioctl	= drm_compat_ioctl,
284	.release	= drm_release,
285};
286
287static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
288{
289	mutex_lock(&bo->pages_lock);
290	if (bo->pages_pin_count++ == 0) {
291		struct page **pages;
292
293		pages = drm_gem_get_pages(&bo->base);
294		if (IS_ERR(pages)) {
295			bo->pages_pin_count--;
296			mutex_unlock(&bo->pages_lock);
297			return pages;
298		}
299
300		bo->pages = pages;
301	}
302	mutex_unlock(&bo->pages_lock);
303
304	return bo->pages;
305}
306
307static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
308{
309	mutex_lock(&bo->pages_lock);
310	if (--bo->pages_pin_count == 0) {
311		drm_gem_put_pages(&bo->base, bo->pages, true, true);
312		bo->pages = NULL;
313	}
314	mutex_unlock(&bo->pages_lock);
315}
316
317static int vgem_prime_pin(struct drm_gem_object *obj)
318{
319	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
320	long n_pages = obj->size >> PAGE_SHIFT;
321	struct page **pages;
322
323	pages = vgem_pin_pages(bo);
324	if (IS_ERR(pages))
325		return PTR_ERR(pages);
326
327	/* Flush the object from the CPU cache so that importers can rely
328	 * on coherent indirect access via the exported dma-address.
329	 */
330	drm_clflush_pages(pages, n_pages);
331
332	return 0;
333}
334
335static void vgem_prime_unpin(struct drm_gem_object *obj)
336{
337	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
338
339	vgem_unpin_pages(bo);
340}
341
342static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
343{
344	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
345
346	return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
347}
348
349static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
350						struct dma_buf *dma_buf)
351{
352	struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
353
354	return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
355}
356
357static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
358			struct dma_buf_attachment *attach, struct sg_table *sg)
359{
360	struct drm_vgem_gem_object *obj;
361	int npages;
362
363	obj = __vgem_gem_create(dev, attach->dmabuf->size);
364	if (IS_ERR(obj))
365		return ERR_CAST(obj);
366
367	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
368
369	obj->table = sg;
370	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
371	if (!obj->pages) {
372		__vgem_gem_destroy(obj);
373		return ERR_PTR(-ENOMEM);
374	}
375
376	obj->pages_pin_count++; /* perma-pinned */
377	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
378					npages);
379	return &obj->base;
380}
381
382static void *vgem_prime_vmap(struct drm_gem_object *obj)
383{
384	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
385	long n_pages = obj->size >> PAGE_SHIFT;
386	struct page **pages;
387
388	pages = vgem_pin_pages(bo);
389	if (IS_ERR(pages))
390		return NULL;
391
392	return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
393}
394
395static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
396{
397	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
398
399	vunmap(vaddr);
400	vgem_unpin_pages(bo);
401}
402
403static int vgem_prime_mmap(struct drm_gem_object *obj,
404			   struct vm_area_struct *vma)
405{
406	int ret;
407
408	if (obj->size < vma->vm_end - vma->vm_start)
409		return -EINVAL;
410
411	if (!obj->filp)
412		return -ENODEV;
413
414	ret = call_mmap(obj->filp, vma);
415	if (ret)
416		return ret;
417
418	fput(vma->vm_file);
419	vma->vm_file = get_file(obj->filp);
420	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
421	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
422
423	return 0;
424}
425
426static void vgem_release(struct drm_device *dev)
427{
428	struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
429
430	platform_device_unregister(vgem->platform);
431	drm_dev_fini(&vgem->drm);
432
433	kfree(vgem);
434}
435
436static struct drm_driver vgem_driver = {
437	.driver_features		= DRIVER_GEM | DRIVER_RENDER,
438	.release			= vgem_release,
439	.open				= vgem_open,
440	.postclose			= vgem_postclose,
441	.gem_free_object_unlocked	= vgem_gem_free_object,
442	.gem_vm_ops			= &vgem_gem_vm_ops,
443	.ioctls				= vgem_ioctls,
444	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
445	.fops				= &vgem_driver_fops,
446
447	.dumb_create			= vgem_gem_dumb_create,
448	.dumb_map_offset		= vgem_gem_dumb_map,
449
450	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
451	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
452	.gem_prime_pin = vgem_prime_pin,
453	.gem_prime_unpin = vgem_prime_unpin,
454	.gem_prime_import = vgem_prime_import,
455	.gem_prime_import_sg_table = vgem_prime_import_sg_table,
456	.gem_prime_get_sg_table = vgem_prime_get_sg_table,
457	.gem_prime_vmap = vgem_prime_vmap,
458	.gem_prime_vunmap = vgem_prime_vunmap,
459	.gem_prime_mmap = vgem_prime_mmap,
460
461	.name	= DRIVER_NAME,
462	.desc	= DRIVER_DESC,
463	.date	= DRIVER_DATE,
464	.major	= DRIVER_MAJOR,
465	.minor	= DRIVER_MINOR,
466};
467
468static int __init vgem_init(void)
469{
470	int ret;
471
472	vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL);
473	if (!vgem_device)
474		return -ENOMEM;
475
476	vgem_device->platform =
477		platform_device_register_simple("vgem", -1, NULL, 0);
478	if (IS_ERR(vgem_device->platform)) {
479		ret = PTR_ERR(vgem_device->platform);
480		goto out_free;
481	}
482
483	dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
484				     DMA_BIT_MASK(64));
485	ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
486			   &vgem_device->platform->dev);
487	if (ret)
488		goto out_unregister;
489
490	/* Final step: expose the device/driver to userspace */
491	ret  = drm_dev_register(&vgem_device->drm, 0);
492	if (ret)
493		goto out_fini;
494
495	return 0;
496
497out_fini:
498	drm_dev_fini(&vgem_device->drm);
499out_unregister:
500	platform_device_unregister(vgem_device->platform);
501out_free:
502	kfree(vgem_device);
503	return ret;
504}
505
506static void __exit vgem_exit(void)
507{
508	drm_dev_unregister(&vgem_device->drm);
509	drm_dev_put(&vgem_device->drm);
510}
511
512module_init(vgem_init);
513module_exit(vgem_exit);
514
515MODULE_AUTHOR("Red Hat, Inc.");
516MODULE_AUTHOR("Intel Corporation");
517MODULE_DESCRIPTION(DRIVER_DESC);
518MODULE_LICENSE("GPL and additional rights");