Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v3.1
  1/*
  2 * Copyright © 2008 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eric Anholt <eric@anholt.net>
 25 *
 26 */
 27
 28#include <linux/types.h>
 29#include <linux/slab.h>
 30#include <linux/mm.h>
 31#include <linux/uaccess.h>
 32#include <linux/fs.h>
 33#include <linux/file.h>
 34#include <linux/module.h>
 35#include <linux/mman.h>
 36#include <linux/pagemap.h>
 37#include <linux/shmem_fs.h>
 
 38#include "drmP.h"
 39
 40/** @file drm_gem.c
 41 *
 42 * This file provides some of the base ioctls and library routines for
 43 * the graphics memory manager implemented by each device driver.
 44 *
 45 * Because various devices have different requirements in terms of
 46 * synchronization and migration strategies, implementing that is left up to
 47 * the driver, and all that the general API provides should be generic --
 48 * allocating objects, reading/writing data with the cpu, freeing objects.
 49 * Even there, platform-dependent optimizations for reading/writing data with
 50 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
 51 * the DRI2 implementation wants to have at least allocate/mmap be generic.
 52 *
 53 * The goal was to have swap-backed object allocation managed through
 54 * struct file.  However, file descriptors as handles to a struct file have
 55 * two major failings:
 56 * - Process limits prevent more than 1024 or so being used at a time by
 57 *   default.
 58 * - Inability to allocate high fds will aggravate the X Server's select()
 59 *   handling, and likely that of many GL client applications as well.
 60 *
 61 * This led to a plan of using our own integer IDs (called handles, following
 62 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
 63 * ioctls.  The objects themselves will still include the struct file so
 64 * that we can transition to fds if the required kernel infrastructure shows
 65 * up at a later date, and as our interface with shmfs for memory allocation.
 66 */
 67
 68/*
 69 * We make up offsets for buffer objects so we can recognize them at
 70 * mmap time.
 71 */
 72
 73/* pgoff in mmap is an unsigned long, so we need to make sure that
 74 * the faked up offset will fit
 75 */
 76
 77#if BITS_PER_LONG == 64
 78#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
 79#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
 80#else
 81#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
 82#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
 83#endif
 84
 85/**
 86 * Initialize the GEM device fields
 87 */
 88
 89int
 90drm_gem_init(struct drm_device *dev)
 91{
 92	struct drm_gem_mm *mm;
 93
 94	spin_lock_init(&dev->object_name_lock);
 95	idr_init(&dev->object_name_idr);
 96
 97	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
 98	if (!mm) {
 99		DRM_ERROR("out of memory\n");
100		return -ENOMEM;
101	}
102
103	dev->mm_private = mm;
104
105	if (drm_ht_create(&mm->offset_hash, 12)) {
106		kfree(mm);
107		return -ENOMEM;
108	}
109
110	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
111			DRM_FILE_PAGE_OFFSET_SIZE)) {
112		drm_ht_remove(&mm->offset_hash);
113		kfree(mm);
114		return -ENOMEM;
115	}
116
117	return 0;
118}
119
120void
121drm_gem_destroy(struct drm_device *dev)
122{
123	struct drm_gem_mm *mm = dev->mm_private;
124
125	drm_mm_takedown(&mm->offset_manager);
126	drm_ht_remove(&mm->offset_hash);
127	kfree(mm);
128	dev->mm_private = NULL;
129}
130
131/**
132 * Initialize an already allocated GEM object of the specified size with
133 * shmfs backing store.
134 */
135int drm_gem_object_init(struct drm_device *dev,
136			struct drm_gem_object *obj, size_t size)
137{
138	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
139
140	obj->dev = dev;
141	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142	if (IS_ERR(obj->filp))
143		return -ENOMEM;
144
145	kref_init(&obj->refcount);
146	atomic_set(&obj->handle_count, 0);
147	obj->size = size;
148
149	return 0;
150}
151EXPORT_SYMBOL(drm_gem_object_init);
152
153/**
154 * Initialize an already allocated GEM object of the specified size with
155 * no GEM provided backing store. Instead the caller is responsible for
156 * backing the object and handling it.
157 */
158int drm_gem_private_object_init(struct drm_device *dev,
159			struct drm_gem_object *obj, size_t size)
160{
161	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
162
163	obj->dev = dev;
164	obj->filp = NULL;
165
166	kref_init(&obj->refcount);
167	atomic_set(&obj->handle_count, 0);
168	obj->size = size;
169
170	return 0;
171}
172EXPORT_SYMBOL(drm_gem_private_object_init);
173
174/**
175 * Allocate a GEM object of the specified size with shmfs backing store
176 */
177struct drm_gem_object *
178drm_gem_object_alloc(struct drm_device *dev, size_t size)
179{
180	struct drm_gem_object *obj;
181
182	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
183	if (!obj)
184		goto free;
185
186	if (drm_gem_object_init(dev, obj, size) != 0)
187		goto free;
188
189	if (dev->driver->gem_init_object != NULL &&
190	    dev->driver->gem_init_object(obj) != 0) {
191		goto fput;
192	}
193	return obj;
194fput:
195	/* Object_init mangles the global counters - readjust them. */
196	fput(obj->filp);
197free:
198	kfree(obj);
199	return NULL;
200}
201EXPORT_SYMBOL(drm_gem_object_alloc);
202
 
 
 
 
 
 
 
 
 
 
 
 
 
203/**
204 * Removes the mapping from handle to filp for this object.
205 */
206int
207drm_gem_handle_delete(struct drm_file *filp, u32 handle)
208{
209	struct drm_device *dev;
210	struct drm_gem_object *obj;
211
212	/* This is gross. The idr system doesn't let us try a delete and
213	 * return an error code.  It just spews if you fail at deleting.
214	 * So, we have to grab a lock around finding the object and then
215	 * doing the delete on it and dropping the refcount, or the user
216	 * could race us to double-decrement the refcount and cause a
217	 * use-after-free later.  Given the frequency of our handle lookups,
218	 * we may want to use ida for number allocation and a hash table
219	 * for the pointers, anyway.
220	 */
221	spin_lock(&filp->table_lock);
222
223	/* Check if we currently have a reference on the object */
224	obj = idr_find(&filp->object_idr, handle);
225	if (obj == NULL) {
226		spin_unlock(&filp->table_lock);
227		return -EINVAL;
228	}
229	dev = obj->dev;
230
231	/* Release reference and decrement refcount. */
232	idr_remove(&filp->object_idr, handle);
233	spin_unlock(&filp->table_lock);
234
 
 
235	if (dev->driver->gem_close_object)
236		dev->driver->gem_close_object(obj, filp);
237	drm_gem_object_handle_unreference_unlocked(obj);
238
239	return 0;
240}
241EXPORT_SYMBOL(drm_gem_handle_delete);
242
243/**
244 * Create a handle for this object. This adds a handle reference
245 * to the object, which includes a regular reference count. Callers
246 * will likely want to dereference the object afterwards.
247 */
248int
249drm_gem_handle_create(struct drm_file *file_priv,
250		       struct drm_gem_object *obj,
251		       u32 *handlep)
252{
253	struct drm_device *dev = obj->dev;
254	int ret;
255
256	/*
257	 * Get the user-visible handle using idr.
258	 */
259again:
260	/* ensure there is space available to allocate a handle */
261	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
262		return -ENOMEM;
263
264	/* do the allocation under our spinlock */
265	spin_lock(&file_priv->table_lock);
266	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
267	spin_unlock(&file_priv->table_lock);
268	if (ret == -EAGAIN)
269		goto again;
270
271	if (ret != 0)
272		return ret;
273
274	drm_gem_object_handle_reference(obj);
275
276	if (dev->driver->gem_open_object) {
277		ret = dev->driver->gem_open_object(obj, file_priv);
278		if (ret) {
279			drm_gem_handle_delete(file_priv, *handlep);
280			return ret;
281		}
282	}
283
284	return 0;
285}
286EXPORT_SYMBOL(drm_gem_handle_create);
287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288/** Returns a reference to the object named by the handle. */
289struct drm_gem_object *
290drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
291		      u32 handle)
292{
293	struct drm_gem_object *obj;
294
295	spin_lock(&filp->table_lock);
296
297	/* Check if we currently have a reference on the object */
298	obj = idr_find(&filp->object_idr, handle);
299	if (obj == NULL) {
300		spin_unlock(&filp->table_lock);
301		return NULL;
302	}
303
304	drm_gem_object_reference(obj);
305
306	spin_unlock(&filp->table_lock);
307
308	return obj;
309}
310EXPORT_SYMBOL(drm_gem_object_lookup);
311
312/**
313 * Releases the handle to an mm object.
314 */
315int
316drm_gem_close_ioctl(struct drm_device *dev, void *data,
317		    struct drm_file *file_priv)
318{
319	struct drm_gem_close *args = data;
320	int ret;
321
322	if (!(dev->driver->driver_features & DRIVER_GEM))
323		return -ENODEV;
324
325	ret = drm_gem_handle_delete(file_priv, args->handle);
326
327	return ret;
328}
329
330/**
331 * Create a global name for an object, returning the name.
332 *
333 * Note that the name does not hold a reference; when the object
334 * is freed, the name goes away.
335 */
336int
337drm_gem_flink_ioctl(struct drm_device *dev, void *data,
338		    struct drm_file *file_priv)
339{
340	struct drm_gem_flink *args = data;
341	struct drm_gem_object *obj;
342	int ret;
343
344	if (!(dev->driver->driver_features & DRIVER_GEM))
345		return -ENODEV;
346
347	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
348	if (obj == NULL)
349		return -ENOENT;
350
351again:
352	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
353		ret = -ENOMEM;
354		goto err;
355	}
356
357	spin_lock(&dev->object_name_lock);
358	if (!obj->name) {
359		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
360					&obj->name);
361		args->name = (uint64_t) obj->name;
362		spin_unlock(&dev->object_name_lock);
363
364		if (ret == -EAGAIN)
365			goto again;
366
367		if (ret != 0)
368			goto err;
369
370		/* Allocate a reference for the name table.  */
371		drm_gem_object_reference(obj);
372	} else {
373		args->name = (uint64_t) obj->name;
374		spin_unlock(&dev->object_name_lock);
375		ret = 0;
376	}
377
378err:
379	drm_gem_object_unreference_unlocked(obj);
380	return ret;
381}
382
383/**
384 * Open an object using the global name, returning a handle and the size.
385 *
386 * This handle (of course) holds a reference to the object, so the object
387 * will not go away until the handle is deleted.
388 */
389int
390drm_gem_open_ioctl(struct drm_device *dev, void *data,
391		   struct drm_file *file_priv)
392{
393	struct drm_gem_open *args = data;
394	struct drm_gem_object *obj;
395	int ret;
396	u32 handle;
397
398	if (!(dev->driver->driver_features & DRIVER_GEM))
399		return -ENODEV;
400
401	spin_lock(&dev->object_name_lock);
402	obj = idr_find(&dev->object_name_idr, (int) args->name);
403	if (obj)
404		drm_gem_object_reference(obj);
405	spin_unlock(&dev->object_name_lock);
406	if (!obj)
407		return -ENOENT;
408
409	ret = drm_gem_handle_create(file_priv, obj, &handle);
410	drm_gem_object_unreference_unlocked(obj);
411	if (ret)
412		return ret;
413
414	args->handle = handle;
415	args->size = obj->size;
416
417	return 0;
418}
419
420/**
421 * Called at device open time, sets up the structure for handling refcounting
422 * of mm objects.
423 */
424void
425drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
426{
427	idr_init(&file_private->object_idr);
428	spin_lock_init(&file_private->table_lock);
429}
430
431/**
432 * Called at device close to release the file's
433 * handle references on objects.
434 */
435static int
436drm_gem_object_release_handle(int id, void *ptr, void *data)
437{
438	struct drm_file *file_priv = data;
439	struct drm_gem_object *obj = ptr;
440	struct drm_device *dev = obj->dev;
441
 
 
442	if (dev->driver->gem_close_object)
443		dev->driver->gem_close_object(obj, file_priv);
444
445	drm_gem_object_handle_unreference_unlocked(obj);
446
447	return 0;
448}
449
450/**
451 * Called at close time when the filp is going away.
452 *
453 * Releases any remaining references on objects by this filp.
454 */
455void
456drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
457{
458	idr_for_each(&file_private->object_idr,
459		     &drm_gem_object_release_handle, file_private);
460
461	idr_remove_all(&file_private->object_idr);
462	idr_destroy(&file_private->object_idr);
463}
464
465void
466drm_gem_object_release(struct drm_gem_object *obj)
467{
468	if (obj->filp)
469	    fput(obj->filp);
470}
471EXPORT_SYMBOL(drm_gem_object_release);
472
473/**
474 * Called after the last reference to the object has been lost.
475 * Must be called holding struct_ mutex
476 *
477 * Frees the object
478 */
479void
480drm_gem_object_free(struct kref *kref)
481{
482	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
483	struct drm_device *dev = obj->dev;
484
485	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
486
487	if (dev->driver->gem_free_object != NULL)
488		dev->driver->gem_free_object(obj);
489}
490EXPORT_SYMBOL(drm_gem_object_free);
491
492static void drm_gem_object_ref_bug(struct kref *list_kref)
493{
494	BUG();
495}
496
497/**
498 * Called after the last handle to the object has been closed
499 *
500 * Removes any name for the object. Note that this must be
501 * called before drm_gem_object_free or we'll be touching
502 * freed memory
503 */
504void drm_gem_object_handle_free(struct drm_gem_object *obj)
505{
506	struct drm_device *dev = obj->dev;
507
508	/* Remove any name for this object */
509	spin_lock(&dev->object_name_lock);
510	if (obj->name) {
511		idr_remove(&dev->object_name_idr, obj->name);
512		obj->name = 0;
513		spin_unlock(&dev->object_name_lock);
514		/*
515		 * The object name held a reference to this object, drop
516		 * that now.
517		*
518		* This cannot be the last reference, since the handle holds one too.
519		 */
520		kref_put(&obj->refcount, drm_gem_object_ref_bug);
521	} else
522		spin_unlock(&dev->object_name_lock);
523
524}
525EXPORT_SYMBOL(drm_gem_object_handle_free);
526
527void drm_gem_vm_open(struct vm_area_struct *vma)
528{
529	struct drm_gem_object *obj = vma->vm_private_data;
530
531	drm_gem_object_reference(obj);
532
533	mutex_lock(&obj->dev->struct_mutex);
534	drm_vm_open_locked(vma);
535	mutex_unlock(&obj->dev->struct_mutex);
536}
537EXPORT_SYMBOL(drm_gem_vm_open);
538
539void drm_gem_vm_close(struct vm_area_struct *vma)
540{
541	struct drm_gem_object *obj = vma->vm_private_data;
542	struct drm_device *dev = obj->dev;
543
544	mutex_lock(&dev->struct_mutex);
545	drm_vm_close_locked(vma);
546	drm_gem_object_unreference(obj);
547	mutex_unlock(&dev->struct_mutex);
548}
549EXPORT_SYMBOL(drm_gem_vm_close);
550
551
552/**
553 * drm_gem_mmap - memory map routine for GEM objects
554 * @filp: DRM file pointer
555 * @vma: VMA for the area to be mapped
556 *
557 * If a driver supports GEM object mapping, mmap calls on the DRM file
558 * descriptor will end up here.
559 *
560 * If we find the object based on the offset passed in (vma->vm_pgoff will
561 * contain the fake offset we created when the GTT map ioctl was called on
562 * the object), we set up the driver fault handler so that any accesses
563 * to the object can be trapped, to perform migration, GTT binding, surface
564 * register allocation, or performance monitoring.
565 */
566int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
567{
568	struct drm_file *priv = filp->private_data;
569	struct drm_device *dev = priv->minor->dev;
570	struct drm_gem_mm *mm = dev->mm_private;
571	struct drm_local_map *map = NULL;
572	struct drm_gem_object *obj;
573	struct drm_hash_item *hash;
574	int ret = 0;
575
 
 
 
576	mutex_lock(&dev->struct_mutex);
577
578	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
579		mutex_unlock(&dev->struct_mutex);
580		return drm_mmap(filp, vma);
581	}
582
583	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
584	if (!map ||
585	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
586		ret =  -EPERM;
587		goto out_unlock;
588	}
589
590	/* Check for valid size. */
591	if (map->size < vma->vm_end - vma->vm_start) {
592		ret = -EINVAL;
593		goto out_unlock;
594	}
595
596	obj = map->handle;
597	if (!obj->dev->driver->gem_vm_ops) {
598		ret = -EINVAL;
599		goto out_unlock;
600	}
601
602	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
603	vma->vm_ops = obj->dev->driver->gem_vm_ops;
604	vma->vm_private_data = map->handle;
605	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
606
607	/* Take a ref for this mapping of the object, so that the fault
608	 * handler can dereference the mmap offset's pointer to the object.
609	 * This reference is cleaned up by the corresponding vm_close
610	 * (which should happen whether the vma was created by this call, or
611	 * by a vm_open due to mremap or partial unmap or whatever).
612	 */
613	drm_gem_object_reference(obj);
614
615	vma->vm_file = filp;	/* Needed for drm_vm_open() */
616	drm_vm_open_locked(vma);
617
618out_unlock:
619	mutex_unlock(&dev->struct_mutex);
620
621	return ret;
622}
623EXPORT_SYMBOL(drm_gem_mmap);
v3.5.6
  1/*
  2 * Copyright © 2008 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eric Anholt <eric@anholt.net>
 25 *
 26 */
 27
 28#include <linux/types.h>
 29#include <linux/slab.h>
 30#include <linux/mm.h>
 31#include <linux/uaccess.h>
 32#include <linux/fs.h>
 33#include <linux/file.h>
 34#include <linux/module.h>
 35#include <linux/mman.h>
 36#include <linux/pagemap.h>
 37#include <linux/shmem_fs.h>
 38#include <linux/dma-buf.h>
 39#include "drmP.h"
 40
 41/** @file drm_gem.c
 42 *
 43 * This file provides some of the base ioctls and library routines for
 44 * the graphics memory manager implemented by each device driver.
 45 *
 46 * Because various devices have different requirements in terms of
 47 * synchronization and migration strategies, implementing that is left up to
 48 * the driver, and all that the general API provides should be generic --
 49 * allocating objects, reading/writing data with the cpu, freeing objects.
 50 * Even there, platform-dependent optimizations for reading/writing data with
 51 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
 52 * the DRI2 implementation wants to have at least allocate/mmap be generic.
 53 *
 54 * The goal was to have swap-backed object allocation managed through
 55 * struct file.  However, file descriptors as handles to a struct file have
 56 * two major failings:
 57 * - Process limits prevent more than 1024 or so being used at a time by
 58 *   default.
 59 * - Inability to allocate high fds will aggravate the X Server's select()
 60 *   handling, and likely that of many GL client applications as well.
 61 *
 62 * This led to a plan of using our own integer IDs (called handles, following
 63 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
 64 * ioctls.  The objects themselves will still include the struct file so
 65 * that we can transition to fds if the required kernel infrastructure shows
 66 * up at a later date, and as our interface with shmfs for memory allocation.
 67 */
 68
 69/*
 70 * We make up offsets for buffer objects so we can recognize them at
 71 * mmap time.
 72 */
 73
 74/* pgoff in mmap is an unsigned long, so we need to make sure that
 75 * the faked up offset will fit
 76 */
 77
 78#if BITS_PER_LONG == 64
 79#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
 80#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
 81#else
 82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
 83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
 84#endif
 85
 86/**
 87 * Initialize the GEM device fields
 88 */
 89
 90int
 91drm_gem_init(struct drm_device *dev)
 92{
 93	struct drm_gem_mm *mm;
 94
 95	spin_lock_init(&dev->object_name_lock);
 96	idr_init(&dev->object_name_idr);
 97
 98	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
 99	if (!mm) {
100		DRM_ERROR("out of memory\n");
101		return -ENOMEM;
102	}
103
104	dev->mm_private = mm;
105
106	if (drm_ht_create(&mm->offset_hash, 12)) {
107		kfree(mm);
108		return -ENOMEM;
109	}
110
111	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112			DRM_FILE_PAGE_OFFSET_SIZE)) {
113		drm_ht_remove(&mm->offset_hash);
114		kfree(mm);
115		return -ENOMEM;
116	}
117
118	return 0;
119}
120
121void
122drm_gem_destroy(struct drm_device *dev)
123{
124	struct drm_gem_mm *mm = dev->mm_private;
125
126	drm_mm_takedown(&mm->offset_manager);
127	drm_ht_remove(&mm->offset_hash);
128	kfree(mm);
129	dev->mm_private = NULL;
130}
131
132/**
133 * Initialize an already allocated GEM object of the specified size with
134 * shmfs backing store.
135 */
136int drm_gem_object_init(struct drm_device *dev,
137			struct drm_gem_object *obj, size_t size)
138{
139	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
140
141	obj->dev = dev;
142	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
143	if (IS_ERR(obj->filp))
144		return PTR_ERR(obj->filp);
145
146	kref_init(&obj->refcount);
147	atomic_set(&obj->handle_count, 0);
148	obj->size = size;
149
150	return 0;
151}
152EXPORT_SYMBOL(drm_gem_object_init);
153
154/**
155 * Initialize an already allocated GEM object of the specified size with
156 * no GEM provided backing store. Instead the caller is responsible for
157 * backing the object and handling it.
158 */
159int drm_gem_private_object_init(struct drm_device *dev,
160			struct drm_gem_object *obj, size_t size)
161{
162	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
163
164	obj->dev = dev;
165	obj->filp = NULL;
166
167	kref_init(&obj->refcount);
168	atomic_set(&obj->handle_count, 0);
169	obj->size = size;
170
171	return 0;
172}
173EXPORT_SYMBOL(drm_gem_private_object_init);
174
175/**
176 * Allocate a GEM object of the specified size with shmfs backing store
177 */
178struct drm_gem_object *
179drm_gem_object_alloc(struct drm_device *dev, size_t size)
180{
181	struct drm_gem_object *obj;
182
183	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
184	if (!obj)
185		goto free;
186
187	if (drm_gem_object_init(dev, obj, size) != 0)
188		goto free;
189
190	if (dev->driver->gem_init_object != NULL &&
191	    dev->driver->gem_init_object(obj) != 0) {
192		goto fput;
193	}
194	return obj;
195fput:
196	/* Object_init mangles the global counters - readjust them. */
197	fput(obj->filp);
198free:
199	kfree(obj);
200	return NULL;
201}
202EXPORT_SYMBOL(drm_gem_object_alloc);
203
204static void
205drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
206{
207	if (obj->import_attach) {
208		drm_prime_remove_imported_buf_handle(&filp->prime,
209				obj->import_attach->dmabuf);
210	}
211	if (obj->export_dma_buf) {
212		drm_prime_remove_imported_buf_handle(&filp->prime,
213				obj->export_dma_buf);
214	}
215}
216
217/**
218 * Removes the mapping from handle to filp for this object.
219 */
220int
221drm_gem_handle_delete(struct drm_file *filp, u32 handle)
222{
223	struct drm_device *dev;
224	struct drm_gem_object *obj;
225
226	/* This is gross. The idr system doesn't let us try a delete and
227	 * return an error code.  It just spews if you fail at deleting.
228	 * So, we have to grab a lock around finding the object and then
229	 * doing the delete on it and dropping the refcount, or the user
230	 * could race us to double-decrement the refcount and cause a
231	 * use-after-free later.  Given the frequency of our handle lookups,
232	 * we may want to use ida for number allocation and a hash table
233	 * for the pointers, anyway.
234	 */
235	spin_lock(&filp->table_lock);
236
237	/* Check if we currently have a reference on the object */
238	obj = idr_find(&filp->object_idr, handle);
239	if (obj == NULL) {
240		spin_unlock(&filp->table_lock);
241		return -EINVAL;
242	}
243	dev = obj->dev;
244
245	/* Release reference and decrement refcount. */
246	idr_remove(&filp->object_idr, handle);
247	spin_unlock(&filp->table_lock);
248
249	drm_gem_remove_prime_handles(obj, filp);
250
251	if (dev->driver->gem_close_object)
252		dev->driver->gem_close_object(obj, filp);
253	drm_gem_object_handle_unreference_unlocked(obj);
254
255	return 0;
256}
257EXPORT_SYMBOL(drm_gem_handle_delete);
258
259/**
260 * Create a handle for this object. This adds a handle reference
261 * to the object, which includes a regular reference count. Callers
262 * will likely want to dereference the object afterwards.
263 */
264int
265drm_gem_handle_create(struct drm_file *file_priv,
266		       struct drm_gem_object *obj,
267		       u32 *handlep)
268{
269	struct drm_device *dev = obj->dev;
270	int ret;
271
272	/*
273	 * Get the user-visible handle using idr.
274	 */
275again:
276	/* ensure there is space available to allocate a handle */
277	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
278		return -ENOMEM;
279
280	/* do the allocation under our spinlock */
281	spin_lock(&file_priv->table_lock);
282	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
283	spin_unlock(&file_priv->table_lock);
284	if (ret == -EAGAIN)
285		goto again;
286	else if (ret)
 
287		return ret;
288
289	drm_gem_object_handle_reference(obj);
290
291	if (dev->driver->gem_open_object) {
292		ret = dev->driver->gem_open_object(obj, file_priv);
293		if (ret) {
294			drm_gem_handle_delete(file_priv, *handlep);
295			return ret;
296		}
297	}
298
299	return 0;
300}
301EXPORT_SYMBOL(drm_gem_handle_create);
302
303
304/**
305 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
306 * @obj: obj in question
307 *
308 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
309 */
310void
311drm_gem_free_mmap_offset(struct drm_gem_object *obj)
312{
313	struct drm_device *dev = obj->dev;
314	struct drm_gem_mm *mm = dev->mm_private;
315	struct drm_map_list *list = &obj->map_list;
316
317	drm_ht_remove_item(&mm->offset_hash, &list->hash);
318	drm_mm_put_block(list->file_offset_node);
319	kfree(list->map);
320	list->map = NULL;
321}
322EXPORT_SYMBOL(drm_gem_free_mmap_offset);
323
324/**
325 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
326 * @obj: obj in question
327 *
328 * GEM memory mapping works by handing back to userspace a fake mmap offset
329 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
330 * up the object based on the offset and sets up the various memory mapping
331 * structures.
332 *
333 * This routine allocates and attaches a fake offset for @obj.
334 */
335int
336drm_gem_create_mmap_offset(struct drm_gem_object *obj)
337{
338	struct drm_device *dev = obj->dev;
339	struct drm_gem_mm *mm = dev->mm_private;
340	struct drm_map_list *list;
341	struct drm_local_map *map;
342	int ret;
343
344	/* Set the object up for mmap'ing */
345	list = &obj->map_list;
346	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
347	if (!list->map)
348		return -ENOMEM;
349
350	map = list->map;
351	map->type = _DRM_GEM;
352	map->size = obj->size;
353	map->handle = obj;
354
355	/* Get a DRM GEM mmap offset allocated... */
356	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
357			obj->size / PAGE_SIZE, 0, 0);
358
359	if (!list->file_offset_node) {
360		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
361		ret = -ENOSPC;
362		goto out_free_list;
363	}
364
365	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
366			obj->size / PAGE_SIZE, 0);
367	if (!list->file_offset_node) {
368		ret = -ENOMEM;
369		goto out_free_list;
370	}
371
372	list->hash.key = list->file_offset_node->start;
373	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
374	if (ret) {
375		DRM_ERROR("failed to add to map hash\n");
376		goto out_free_mm;
377	}
378
379	return 0;
380
381out_free_mm:
382	drm_mm_put_block(list->file_offset_node);
383out_free_list:
384	kfree(list->map);
385	list->map = NULL;
386
387	return ret;
388}
389EXPORT_SYMBOL(drm_gem_create_mmap_offset);
390
391/** Returns a reference to the object named by the handle. */
392struct drm_gem_object *
393drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
394		      u32 handle)
395{
396	struct drm_gem_object *obj;
397
398	spin_lock(&filp->table_lock);
399
400	/* Check if we currently have a reference on the object */
401	obj = idr_find(&filp->object_idr, handle);
402	if (obj == NULL) {
403		spin_unlock(&filp->table_lock);
404		return NULL;
405	}
406
407	drm_gem_object_reference(obj);
408
409	spin_unlock(&filp->table_lock);
410
411	return obj;
412}
413EXPORT_SYMBOL(drm_gem_object_lookup);
414
415/**
416 * Releases the handle to an mm object.
417 */
418int
419drm_gem_close_ioctl(struct drm_device *dev, void *data,
420		    struct drm_file *file_priv)
421{
422	struct drm_gem_close *args = data;
423	int ret;
424
425	if (!(dev->driver->driver_features & DRIVER_GEM))
426		return -ENODEV;
427
428	ret = drm_gem_handle_delete(file_priv, args->handle);
429
430	return ret;
431}
432
433/**
434 * Create a global name for an object, returning the name.
435 *
436 * Note that the name does not hold a reference; when the object
437 * is freed, the name goes away.
438 */
439int
440drm_gem_flink_ioctl(struct drm_device *dev, void *data,
441		    struct drm_file *file_priv)
442{
443	struct drm_gem_flink *args = data;
444	struct drm_gem_object *obj;
445	int ret;
446
447	if (!(dev->driver->driver_features & DRIVER_GEM))
448		return -ENODEV;
449
450	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
451	if (obj == NULL)
452		return -ENOENT;
453
454again:
455	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
456		ret = -ENOMEM;
457		goto err;
458	}
459
460	spin_lock(&dev->object_name_lock);
461	if (!obj->name) {
462		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
463					&obj->name);
464		args->name = (uint64_t) obj->name;
465		spin_unlock(&dev->object_name_lock);
466
467		if (ret == -EAGAIN)
468			goto again;
469		else if (ret)
 
470			goto err;
471
472		/* Allocate a reference for the name table.  */
473		drm_gem_object_reference(obj);
474	} else {
475		args->name = (uint64_t) obj->name;
476		spin_unlock(&dev->object_name_lock);
477		ret = 0;
478	}
479
480err:
481	drm_gem_object_unreference_unlocked(obj);
482	return ret;
483}
484
485/**
486 * Open an object using the global name, returning a handle and the size.
487 *
488 * This handle (of course) holds a reference to the object, so the object
489 * will not go away until the handle is deleted.
490 */
491int
492drm_gem_open_ioctl(struct drm_device *dev, void *data,
493		   struct drm_file *file_priv)
494{
495	struct drm_gem_open *args = data;
496	struct drm_gem_object *obj;
497	int ret;
498	u32 handle;
499
500	if (!(dev->driver->driver_features & DRIVER_GEM))
501		return -ENODEV;
502
503	spin_lock(&dev->object_name_lock);
504	obj = idr_find(&dev->object_name_idr, (int) args->name);
505	if (obj)
506		drm_gem_object_reference(obj);
507	spin_unlock(&dev->object_name_lock);
508	if (!obj)
509		return -ENOENT;
510
511	ret = drm_gem_handle_create(file_priv, obj, &handle);
512	drm_gem_object_unreference_unlocked(obj);
513	if (ret)
514		return ret;
515
516	args->handle = handle;
517	args->size = obj->size;
518
519	return 0;
520}
521
522/**
523 * Called at device open time, sets up the structure for handling refcounting
524 * of mm objects.
525 */
526void
527drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
528{
529	idr_init(&file_private->object_idr);
530	spin_lock_init(&file_private->table_lock);
531}
532
533/**
534 * Called at device close to release the file's
535 * handle references on objects.
536 */
537static int
538drm_gem_object_release_handle(int id, void *ptr, void *data)
539{
540	struct drm_file *file_priv = data;
541	struct drm_gem_object *obj = ptr;
542	struct drm_device *dev = obj->dev;
543
544	drm_gem_remove_prime_handles(obj, file_priv);
545
546	if (dev->driver->gem_close_object)
547		dev->driver->gem_close_object(obj, file_priv);
548
549	drm_gem_object_handle_unreference_unlocked(obj);
550
551	return 0;
552}
553
554/**
555 * Called at close time when the filp is going away.
556 *
557 * Releases any remaining references on objects by this filp.
558 */
559void
560drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
561{
562	idr_for_each(&file_private->object_idr,
563		     &drm_gem_object_release_handle, file_private);
564
565	idr_remove_all(&file_private->object_idr);
566	idr_destroy(&file_private->object_idr);
567}
568
569void
570drm_gem_object_release(struct drm_gem_object *obj)
571{
572	if (obj->filp)
573	    fput(obj->filp);
574}
575EXPORT_SYMBOL(drm_gem_object_release);
576
577/**
578 * Called after the last reference to the object has been lost.
579 * Must be called holding struct_ mutex
580 *
581 * Frees the object
582 */
583void
584drm_gem_object_free(struct kref *kref)
585{
586	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
587	struct drm_device *dev = obj->dev;
588
589	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
590
591	if (dev->driver->gem_free_object != NULL)
592		dev->driver->gem_free_object(obj);
593}
594EXPORT_SYMBOL(drm_gem_object_free);
595
596static void drm_gem_object_ref_bug(struct kref *list_kref)
597{
598	BUG();
599}
600
601/**
602 * Called after the last handle to the object has been closed
603 *
604 * Removes any name for the object. Note that this must be
605 * called before drm_gem_object_free or we'll be touching
606 * freed memory
607 */
608void drm_gem_object_handle_free(struct drm_gem_object *obj)
609{
610	struct drm_device *dev = obj->dev;
611
612	/* Remove any name for this object */
613	spin_lock(&dev->object_name_lock);
614	if (obj->name) {
615		idr_remove(&dev->object_name_idr, obj->name);
616		obj->name = 0;
617		spin_unlock(&dev->object_name_lock);
618		/*
619		 * The object name held a reference to this object, drop
620		 * that now.
621		*
622		* This cannot be the last reference, since the handle holds one too.
623		 */
624		kref_put(&obj->refcount, drm_gem_object_ref_bug);
625	} else
626		spin_unlock(&dev->object_name_lock);
627
628}
629EXPORT_SYMBOL(drm_gem_object_handle_free);
630
631void drm_gem_vm_open(struct vm_area_struct *vma)
632{
633	struct drm_gem_object *obj = vma->vm_private_data;
634
635	drm_gem_object_reference(obj);
636
637	mutex_lock(&obj->dev->struct_mutex);
638	drm_vm_open_locked(obj->dev, vma);
639	mutex_unlock(&obj->dev->struct_mutex);
640}
641EXPORT_SYMBOL(drm_gem_vm_open);
642
643void drm_gem_vm_close(struct vm_area_struct *vma)
644{
645	struct drm_gem_object *obj = vma->vm_private_data;
646	struct drm_device *dev = obj->dev;
647
648	mutex_lock(&dev->struct_mutex);
649	drm_vm_close_locked(obj->dev, vma);
650	drm_gem_object_unreference(obj);
651	mutex_unlock(&dev->struct_mutex);
652}
653EXPORT_SYMBOL(drm_gem_vm_close);
654
655
656/**
657 * drm_gem_mmap - memory map routine for GEM objects
658 * @filp: DRM file pointer
659 * @vma: VMA for the area to be mapped
660 *
661 * If a driver supports GEM object mapping, mmap calls on the DRM file
662 * descriptor will end up here.
663 *
664 * If we find the object based on the offset passed in (vma->vm_pgoff will
665 * contain the fake offset we created when the GTT map ioctl was called on
666 * the object), we set up the driver fault handler so that any accesses
667 * to the object can be trapped, to perform migration, GTT binding, surface
668 * register allocation, or performance monitoring.
669 */
670int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
671{
672	struct drm_file *priv = filp->private_data;
673	struct drm_device *dev = priv->minor->dev;
674	struct drm_gem_mm *mm = dev->mm_private;
675	struct drm_local_map *map = NULL;
676	struct drm_gem_object *obj;
677	struct drm_hash_item *hash;
678	int ret = 0;
679
680	if (drm_device_is_unplugged(dev))
681		return -ENODEV;
682
683	mutex_lock(&dev->struct_mutex);
684
685	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
686		mutex_unlock(&dev->struct_mutex);
687		return drm_mmap(filp, vma);
688	}
689
690	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
691	if (!map ||
692	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
693		ret =  -EPERM;
694		goto out_unlock;
695	}
696
697	/* Check for valid size. */
698	if (map->size < vma->vm_end - vma->vm_start) {
699		ret = -EINVAL;
700		goto out_unlock;
701	}
702
703	obj = map->handle;
704	if (!obj->dev->driver->gem_vm_ops) {
705		ret = -EINVAL;
706		goto out_unlock;
707	}
708
709	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
710	vma->vm_ops = obj->dev->driver->gem_vm_ops;
711	vma->vm_private_data = map->handle;
712	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
713
714	/* Take a ref for this mapping of the object, so that the fault
715	 * handler can dereference the mmap offset's pointer to the object.
716	 * This reference is cleaned up by the corresponding vm_close
717	 * (which should happen whether the vma was created by this call, or
718	 * by a vm_open due to mremap or partial unmap or whatever).
719	 */
720	drm_gem_object_reference(obj);
721
722	drm_vm_open_locked(dev, vma);
 
723
724out_unlock:
725	mutex_unlock(&dev->struct_mutex);
726
727	return ret;
728}
729EXPORT_SYMBOL(drm_gem_mmap);