Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright © 2008 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eric Anholt <eric@anholt.net>
 25 *
 26 */
 27
 28#include <linux/types.h>
 29#include <linux/slab.h>
 30#include <linux/mm.h>
 31#include <linux/uaccess.h>
 32#include <linux/fs.h>
 33#include <linux/file.h>
 34#include <linux/module.h>
 35#include <linux/mman.h>
 36#include <linux/pagemap.h>
 37#include <linux/shmem_fs.h>
 38#include "drmP.h"
 
 
 
 
 
 
 39
 40/** @file drm_gem.c
 41 *
 42 * This file provides some of the base ioctls and library routines for
 43 * the graphics memory manager implemented by each device driver.
 44 *
 45 * Because various devices have different requirements in terms of
 46 * synchronization and migration strategies, implementing that is left up to
 47 * the driver, and all that the general API provides should be generic --
 48 * allocating objects, reading/writing data with the cpu, freeing objects.
 49 * Even there, platform-dependent optimizations for reading/writing data with
 50 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
 51 * the DRI2 implementation wants to have at least allocate/mmap be generic.
 52 *
 53 * The goal was to have swap-backed object allocation managed through
 54 * struct file.  However, file descriptors as handles to a struct file have
 55 * two major failings:
 56 * - Process limits prevent more than 1024 or so being used at a time by
 57 *   default.
 58 * - Inability to allocate high fds will aggravate the X Server's select()
 59 *   handling, and likely that of many GL client applications as well.
 60 *
 61 * This led to a plan of using our own integer IDs (called handles, following
 62 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
 63 * ioctls.  The objects themselves will still include the struct file so
 64 * that we can transition to fds if the required kernel infrastructure shows
 65 * up at a later date, and as our interface with shmfs for memory allocation.
 66 */
 67
 68/*
 69 * We make up offsets for buffer objects so we can recognize them at
 70 * mmap time.
 71 */
 72
 73/* pgoff in mmap is an unsigned long, so we need to make sure that
 74 * the faked up offset will fit
 75 */
 76
 77#if BITS_PER_LONG == 64
 78#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
 79#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
 80#else
 81#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
 82#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
 83#endif
 84
 85/**
 86 * Initialize the GEM device fields
 
 87 */
 88
 89int
 90drm_gem_init(struct drm_device *dev)
 91{
 92	struct drm_gem_mm *mm;
 93
 94	spin_lock_init(&dev->object_name_lock);
 95	idr_init(&dev->object_name_idr);
 96
 97	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
 98	if (!mm) {
 99		DRM_ERROR("out of memory\n");
100		return -ENOMEM;
101	}
102
103	dev->mm_private = mm;
104
105	if (drm_ht_create(&mm->offset_hash, 12)) {
106		kfree(mm);
107		return -ENOMEM;
108	}
109
110	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
111			DRM_FILE_PAGE_OFFSET_SIZE)) {
112		drm_ht_remove(&mm->offset_hash);
113		kfree(mm);
114		return -ENOMEM;
115	}
116
117	return 0;
118}
119
120void
121drm_gem_destroy(struct drm_device *dev)
122{
123	struct drm_gem_mm *mm = dev->mm_private;
124
125	drm_mm_takedown(&mm->offset_manager);
126	drm_ht_remove(&mm->offset_hash);
127	kfree(mm);
128	dev->mm_private = NULL;
129}
130
131/**
 
 
 
 
 
132 * Initialize an already allocated GEM object of the specified size with
133 * shmfs backing store.
134 */
135int drm_gem_object_init(struct drm_device *dev,
136			struct drm_gem_object *obj, size_t size)
137{
138	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
139
140	obj->dev = dev;
141	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142	if (IS_ERR(obj->filp))
143		return -ENOMEM;
144
145	kref_init(&obj->refcount);
146	atomic_set(&obj->handle_count, 0);
147	obj->size = size;
 
 
148
149	return 0;
150}
151EXPORT_SYMBOL(drm_gem_object_init);
152
153/**
 
 
 
 
 
154 * Initialize an already allocated GEM object of the specified size with
155 * no GEM provided backing store. Instead the caller is responsible for
156 * backing the object and handling it.
157 */
158int drm_gem_private_object_init(struct drm_device *dev,
159			struct drm_gem_object *obj, size_t size)
160{
161	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
162
163	obj->dev = dev;
164	obj->filp = NULL;
165
166	kref_init(&obj->refcount);
167	atomic_set(&obj->handle_count, 0);
168	obj->size = size;
169
170	return 0;
171}
172EXPORT_SYMBOL(drm_gem_private_object_init);
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174/**
175 * Allocate a GEM object of the specified size with shmfs backing store
 
 
 
 
 
 
 
176 */
177struct drm_gem_object *
178drm_gem_object_alloc(struct drm_device *dev, size_t size)
179{
180	struct drm_gem_object *obj;
181
182	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
183	if (!obj)
184		goto free;
 
 
 
185
186	if (drm_gem_object_init(dev, obj, size) != 0)
187		goto free;
 
 
 
 
 
 
 
 
 
 
 
 
188
189	if (dev->driver->gem_init_object != NULL &&
190	    dev->driver->gem_init_object(obj) != 0) {
191		goto fput;
 
 
 
 
 
 
 
 
 
 
 
192	}
193	return obj;
194fput:
195	/* Object_init mangles the global counters - readjust them. */
196	fput(obj->filp);
197free:
198	kfree(obj);
199	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200}
201EXPORT_SYMBOL(drm_gem_object_alloc);
202
203/**
204 * Removes the mapping from handle to filp for this object.
 
 
 
 
 
 
205 */
206int
207drm_gem_handle_delete(struct drm_file *filp, u32 handle)
208{
209	struct drm_device *dev;
210	struct drm_gem_object *obj;
211
212	/* This is gross. The idr system doesn't let us try a delete and
213	 * return an error code.  It just spews if you fail at deleting.
214	 * So, we have to grab a lock around finding the object and then
215	 * doing the delete on it and dropping the refcount, or the user
216	 * could race us to double-decrement the refcount and cause a
217	 * use-after-free later.  Given the frequency of our handle lookups,
218	 * we may want to use ida for number allocation and a hash table
219	 * for the pointers, anyway.
220	 */
221	spin_lock(&filp->table_lock);
222
223	/* Check if we currently have a reference on the object */
224	obj = idr_find(&filp->object_idr, handle);
225	if (obj == NULL) {
226		spin_unlock(&filp->table_lock);
227		return -EINVAL;
228	}
229	dev = obj->dev;
230
231	/* Release reference and decrement refcount. */
 
 
 
 
232	idr_remove(&filp->object_idr, handle);
233	spin_unlock(&filp->table_lock);
234
235	if (dev->driver->gem_close_object)
236		dev->driver->gem_close_object(obj, filp);
237	drm_gem_object_handle_unreference_unlocked(obj);
238
239	return 0;
240}
241EXPORT_SYMBOL(drm_gem_handle_delete);
242
243/**
244 * Create a handle for this object. This adds a handle reference
245 * to the object, which includes a regular reference count. Callers
246 * will likely want to dereference the object afterwards.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247 */
248int
249drm_gem_handle_create(struct drm_file *file_priv,
250		       struct drm_gem_object *obj,
251		       u32 *handlep)
252{
253	struct drm_device *dev = obj->dev;
 
254	int ret;
255
 
 
 
 
256	/*
257	 * Get the user-visible handle using idr.
 
258	 */
259again:
260	/* ensure there is space available to allocate a handle */
261	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
262		return -ENOMEM;
263
264	/* do the allocation under our spinlock */
265	spin_lock(&file_priv->table_lock);
266	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
 
 
267	spin_unlock(&file_priv->table_lock);
268	if (ret == -EAGAIN)
269		goto again;
270
271	if (ret != 0)
272		return ret;
 
 
 
273
274	drm_gem_object_handle_reference(obj);
 
 
275
276	if (dev->driver->gem_open_object) {
277		ret = dev->driver->gem_open_object(obj, file_priv);
278		if (ret) {
279			drm_gem_handle_delete(file_priv, *handlep);
280			return ret;
281		}
282	}
283
 
284	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285}
286EXPORT_SYMBOL(drm_gem_handle_create);
287
288/** Returns a reference to the object named by the handle. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289struct drm_gem_object *
290drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
291		      u32 handle)
292{
293	struct drm_gem_object *obj;
294
295	spin_lock(&filp->table_lock);
296
297	/* Check if we currently have a reference on the object */
298	obj = idr_find(&filp->object_idr, handle);
299	if (obj == NULL) {
300		spin_unlock(&filp->table_lock);
301		return NULL;
302	}
303
304	drm_gem_object_reference(obj);
305
306	spin_unlock(&filp->table_lock);
307
308	return obj;
309}
310EXPORT_SYMBOL(drm_gem_object_lookup);
311
312/**
 
 
 
 
 
313 * Releases the handle to an mm object.
314 */
315int
316drm_gem_close_ioctl(struct drm_device *dev, void *data,
317		    struct drm_file *file_priv)
318{
319	struct drm_gem_close *args = data;
320	int ret;
321
322	if (!(dev->driver->driver_features & DRIVER_GEM))
323		return -ENODEV;
324
325	ret = drm_gem_handle_delete(file_priv, args->handle);
326
327	return ret;
328}
329
330/**
 
 
 
 
 
331 * Create a global name for an object, returning the name.
332 *
333 * Note that the name does not hold a reference; when the object
334 * is freed, the name goes away.
335 */
336int
337drm_gem_flink_ioctl(struct drm_device *dev, void *data,
338		    struct drm_file *file_priv)
339{
340	struct drm_gem_flink *args = data;
341	struct drm_gem_object *obj;
342	int ret;
343
344	if (!(dev->driver->driver_features & DRIVER_GEM))
345		return -ENODEV;
346
347	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
348	if (obj == NULL)
349		return -ENOENT;
350
351again:
352	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
353		ret = -ENOMEM;
 
354		goto err;
355	}
356
357	spin_lock(&dev->object_name_lock);
358	if (!obj->name) {
359		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
360					&obj->name);
361		args->name = (uint64_t) obj->name;
362		spin_unlock(&dev->object_name_lock);
363
364		if (ret == -EAGAIN)
365			goto again;
366
367		if (ret != 0)
368			goto err;
369
370		/* Allocate a reference for the name table.  */
371		drm_gem_object_reference(obj);
372	} else {
373		args->name = (uint64_t) obj->name;
374		spin_unlock(&dev->object_name_lock);
375		ret = 0;
376	}
377
 
 
 
378err:
379	drm_gem_object_unreference_unlocked(obj);
 
380	return ret;
381}
382
383/**
 
 
 
 
 
384 * Open an object using the global name, returning a handle and the size.
385 *
386 * This handle (of course) holds a reference to the object, so the object
387 * will not go away until the handle is deleted.
388 */
389int
390drm_gem_open_ioctl(struct drm_device *dev, void *data,
391		   struct drm_file *file_priv)
392{
393	struct drm_gem_open *args = data;
394	struct drm_gem_object *obj;
395	int ret;
396	u32 handle;
397
398	if (!(dev->driver->driver_features & DRIVER_GEM))
399		return -ENODEV;
400
401	spin_lock(&dev->object_name_lock);
402	obj = idr_find(&dev->object_name_idr, (int) args->name);
403	if (obj)
404		drm_gem_object_reference(obj);
405	spin_unlock(&dev->object_name_lock);
406	if (!obj)
407		return -ENOENT;
 
408
409	ret = drm_gem_handle_create(file_priv, obj, &handle);
410	drm_gem_object_unreference_unlocked(obj);
 
411	if (ret)
412		return ret;
413
414	args->handle = handle;
415	args->size = obj->size;
416
417	return 0;
418}
419
420/**
 
 
 
 
421 * Called at device open time, sets up the structure for handling refcounting
422 * of mm objects.
423 */
424void
425drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
426{
427	idr_init(&file_private->object_idr);
428	spin_lock_init(&file_private->table_lock);
429}
430
431/**
432 * Called at device close to release the file's
433 * handle references on objects.
434 */
435static int
436drm_gem_object_release_handle(int id, void *ptr, void *data)
437{
438	struct drm_file *file_priv = data;
439	struct drm_gem_object *obj = ptr;
440	struct drm_device *dev = obj->dev;
441
442	if (dev->driver->gem_close_object)
443		dev->driver->gem_close_object(obj, file_priv);
444
445	drm_gem_object_handle_unreference_unlocked(obj);
446
447	return 0;
448}
449
450/**
451 * Called at close time when the filp is going away.
452 *
453 * Releases any remaining references on objects by this filp.
454 */
455void
456drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
457{
458	idr_for_each(&file_private->object_idr,
459		     &drm_gem_object_release_handle, file_private);
460
461	idr_remove_all(&file_private->object_idr);
462	idr_destroy(&file_private->object_idr);
463}
464
 
 
 
 
 
 
 
465void
466drm_gem_object_release(struct drm_gem_object *obj)
467{
 
 
468	if (obj->filp)
469	    fput(obj->filp);
 
 
470}
471EXPORT_SYMBOL(drm_gem_object_release);
472
473/**
 
 
 
474 * Called after the last reference to the object has been lost.
475 * Must be called holding struct_ mutex
476 *
477 * Frees the object
478 */
479void
480drm_gem_object_free(struct kref *kref)
481{
482	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
 
483	struct drm_device *dev = obj->dev;
484
485	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 
 
 
486
487	if (dev->driver->gem_free_object != NULL)
488		dev->driver->gem_free_object(obj);
 
489}
490EXPORT_SYMBOL(drm_gem_object_free);
491
492static void drm_gem_object_ref_bug(struct kref *list_kref)
 
 
 
 
 
 
 
 
 
 
493{
494	BUG();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495}
 
496
497/**
498 * Called after the last handle to the object has been closed
 
499 *
500 * Removes any name for the object. Note that this must be
501 * called before drm_gem_object_free or we'll be touching
502 * freed memory
 
 
 
503 */
504void drm_gem_object_handle_free(struct drm_gem_object *obj)
 
505{
506	struct drm_device *dev = obj->dev;
507
508	/* Remove any name for this object */
509	spin_lock(&dev->object_name_lock);
510	if (obj->name) {
511		idr_remove(&dev->object_name_idr, obj->name);
512		obj->name = 0;
513		spin_unlock(&dev->object_name_lock);
514		/*
515		 * The object name held a reference to this object, drop
516		 * that now.
517		*
518		* This cannot be the last reference, since the handle holds one too.
519		 */
520		kref_put(&obj->refcount, drm_gem_object_ref_bug);
521	} else
522		spin_unlock(&dev->object_name_lock);
523
 
 
524}
525EXPORT_SYMBOL(drm_gem_object_handle_free);
526
 
 
 
 
 
 
 
527void drm_gem_vm_open(struct vm_area_struct *vma)
528{
529	struct drm_gem_object *obj = vma->vm_private_data;
530
531	drm_gem_object_reference(obj);
532
533	mutex_lock(&obj->dev->struct_mutex);
534	drm_vm_open_locked(vma);
535	mutex_unlock(&obj->dev->struct_mutex);
536}
537EXPORT_SYMBOL(drm_gem_vm_open);
538
 
 
 
 
 
 
 
539void drm_gem_vm_close(struct vm_area_struct *vma)
540{
541	struct drm_gem_object *obj = vma->vm_private_data;
542	struct drm_device *dev = obj->dev;
543
544	mutex_lock(&dev->struct_mutex);
545	drm_vm_close_locked(vma);
546	drm_gem_object_unreference(obj);
547	mutex_unlock(&dev->struct_mutex);
548}
549EXPORT_SYMBOL(drm_gem_vm_close);
550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
552/**
553 * drm_gem_mmap - memory map routine for GEM objects
554 * @filp: DRM file pointer
555 * @vma: VMA for the area to be mapped
556 *
557 * If a driver supports GEM object mapping, mmap calls on the DRM file
558 * descriptor will end up here.
559 *
560 * If we find the object based on the offset passed in (vma->vm_pgoff will
561 * contain the fake offset we created when the GTT map ioctl was called on
562 * the object), we set up the driver fault handler so that any accesses
563 * to the object can be trapped, to perform migration, GTT binding, surface
564 * register allocation, or performance monitoring.
 
565 */
566int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
567{
568	struct drm_file *priv = filp->private_data;
569	struct drm_device *dev = priv->minor->dev;
570	struct drm_gem_mm *mm = dev->mm_private;
571	struct drm_local_map *map = NULL;
572	struct drm_gem_object *obj;
573	struct drm_hash_item *hash;
574	int ret = 0;
575
576	mutex_lock(&dev->struct_mutex);
577
578	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
579		mutex_unlock(&dev->struct_mutex);
580		return drm_mmap(filp, vma);
581	}
582
583	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
584	if (!map ||
585	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
586		ret =  -EPERM;
587		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
588	}
 
589
590	/* Check for valid size. */
591	if (map->size < vma->vm_end - vma->vm_start) {
592		ret = -EINVAL;
593		goto out_unlock;
594	}
595
596	obj = map->handle;
597	if (!obj->dev->driver->gem_vm_ops) {
598		ret = -EINVAL;
599		goto out_unlock;
600	}
601
602	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
603	vma->vm_ops = obj->dev->driver->gem_vm_ops;
604	vma->vm_private_data = map->handle;
605	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
606
607	/* Take a ref for this mapping of the object, so that the fault
608	 * handler can dereference the mmap offset's pointer to the object.
609	 * This reference is cleaned up by the corresponding vm_close
610	 * (which should happen whether the vma was created by this call, or
611	 * by a vm_open due to mremap or partial unmap or whatever).
612	 */
613	drm_gem_object_reference(obj);
614
615	vma->vm_file = filp;	/* Needed for drm_vm_open() */
616	drm_vm_open_locked(vma);
617
618out_unlock:
619	mutex_unlock(&dev->struct_mutex);
620
621	return ret;
622}
623EXPORT_SYMBOL(drm_gem_mmap);
v4.17
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *
  26 */
  27
  28#include <linux/types.h>
  29#include <linux/slab.h>
  30#include <linux/mm.h>
  31#include <linux/uaccess.h>
  32#include <linux/fs.h>
  33#include <linux/file.h>
  34#include <linux/module.h>
  35#include <linux/mman.h>
  36#include <linux/pagemap.h>
  37#include <linux/shmem_fs.h>
  38#include <linux/dma-buf.h>
  39#include <linux/mem_encrypt.h>
  40#include <drm/drmP.h>
  41#include <drm/drm_vma_manager.h>
  42#include <drm/drm_gem.h>
  43#include <drm/drm_print.h>
  44#include "drm_internal.h"
  45
  46/** @file drm_gem.c
  47 *
  48 * This file provides some of the base ioctls and library routines for
  49 * the graphics memory manager implemented by each device driver.
  50 *
  51 * Because various devices have different requirements in terms of
  52 * synchronization and migration strategies, implementing that is left up to
  53 * the driver, and all that the general API provides should be generic --
  54 * allocating objects, reading/writing data with the cpu, freeing objects.
  55 * Even there, platform-dependent optimizations for reading/writing data with
  56 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  57 * the DRI2 implementation wants to have at least allocate/mmap be generic.
  58 *
  59 * The goal was to have swap-backed object allocation managed through
  60 * struct file.  However, file descriptors as handles to a struct file have
  61 * two major failings:
  62 * - Process limits prevent more than 1024 or so being used at a time by
  63 *   default.
  64 * - Inability to allocate high fds will aggravate the X Server's select()
  65 *   handling, and likely that of many GL client applications as well.
  66 *
  67 * This led to a plan of using our own integer IDs (called handles, following
  68 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  69 * ioctls.  The objects themselves will still include the struct file so
  70 * that we can transition to fds if the required kernel infrastructure shows
  71 * up at a later date, and as our interface with shmfs for memory allocation.
  72 */
  73
  74/*
  75 * We make up offsets for buffer objects so we can recognize them at
  76 * mmap time.
  77 */
  78
  79/* pgoff in mmap is an unsigned long, so we need to make sure that
  80 * the faked up offset will fit
  81 */
  82
  83#if BITS_PER_LONG == 64
  84#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  85#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  86#else
  87#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  88#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  89#endif
  90
  91/**
  92 * drm_gem_init - Initialize the GEM device fields
  93 * @dev: drm_devic structure to initialize
  94 */
 
  95int
  96drm_gem_init(struct drm_device *dev)
  97{
  98	struct drm_vma_offset_manager *vma_offset_manager;
  99
 100	mutex_init(&dev->object_name_lock);
 101	idr_init_base(&dev->object_name_idr, 1);
 102
 103	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
 104	if (!vma_offset_manager) {
 105		DRM_ERROR("out of memory\n");
 106		return -ENOMEM;
 107	}
 108
 109	dev->vma_offset_manager = vma_offset_manager;
 110	drm_vma_offset_manager_init(vma_offset_manager,
 111				    DRM_FILE_PAGE_OFFSET_START,
 112				    DRM_FILE_PAGE_OFFSET_SIZE);
 
 
 
 
 
 
 
 
 
 113
 114	return 0;
 115}
 116
 117void
 118drm_gem_destroy(struct drm_device *dev)
 119{
 
 120
 121	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
 122	kfree(dev->vma_offset_manager);
 123	dev->vma_offset_manager = NULL;
 
 124}
 125
 126/**
 127 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
 128 * @dev: drm_device the object should be initialized for
 129 * @obj: drm_gem_object to initialize
 130 * @size: object size
 131 *
 132 * Initialize an already allocated GEM object of the specified size with
 133 * shmfs backing store.
 134 */
 135int drm_gem_object_init(struct drm_device *dev,
 136			struct drm_gem_object *obj, size_t size)
 137{
 138	struct file *filp;
 139
 140	drm_gem_private_object_init(dev, obj, size);
 
 
 
 141
 142	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
 143	if (IS_ERR(filp))
 144		return PTR_ERR(filp);
 145
 146	obj->filp = filp;
 147
 148	return 0;
 149}
 150EXPORT_SYMBOL(drm_gem_object_init);
 151
 152/**
 153 * drm_gem_private_object_init - initialize an allocated private GEM object
 154 * @dev: drm_device the object should be initialized for
 155 * @obj: drm_gem_object to initialize
 156 * @size: object size
 157 *
 158 * Initialize an already allocated GEM object of the specified size with
 159 * no GEM provided backing store. Instead the caller is responsible for
 160 * backing the object and handling it.
 161 */
 162void drm_gem_private_object_init(struct drm_device *dev,
 163				 struct drm_gem_object *obj, size_t size)
 164{
 165	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 166
 167	obj->dev = dev;
 168	obj->filp = NULL;
 169
 170	kref_init(&obj->refcount);
 171	obj->handle_count = 0;
 172	obj->size = size;
 173	drm_vma_node_reset(&obj->vma_node);
 
 174}
 175EXPORT_SYMBOL(drm_gem_private_object_init);
 176
 177static void
 178drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 179{
 180	/*
 181	 * Note: obj->dma_buf can't disappear as long as we still hold a
 182	 * handle reference in obj->handle_count.
 183	 */
 184	mutex_lock(&filp->prime.lock);
 185	if (obj->dma_buf) {
 186		drm_prime_remove_buf_handle_locked(&filp->prime,
 187						   obj->dma_buf);
 188	}
 189	mutex_unlock(&filp->prime.lock);
 190}
 191
 192/**
 193 * drm_gem_object_handle_free - release resources bound to userspace handles
 194 * @obj: GEM object to clean up.
 195 *
 196 * Called after the last handle to the object has been closed
 197 *
 198 * Removes any name for the object. Note that this must be
 199 * called before drm_gem_object_free or we'll be touching
 200 * freed memory
 201 */
 202static void drm_gem_object_handle_free(struct drm_gem_object *obj)
 
 203{
 204	struct drm_device *dev = obj->dev;
 205
 206	/* Remove any name for this object */
 207	if (obj->name) {
 208		idr_remove(&dev->object_name_idr, obj->name);
 209		obj->name = 0;
 210	}
 211}
 212
 213static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
 214{
 215	/* Unbreak the reference cycle if we have an exported dma_buf. */
 216	if (obj->dma_buf) {
 217		dma_buf_put(obj->dma_buf);
 218		obj->dma_buf = NULL;
 219	}
 220}
 221
 222static void
 223drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
 224{
 225	struct drm_device *dev = obj->dev;
 226	bool final = false;
 227
 228	if (WARN_ON(obj->handle_count == 0))
 229		return;
 230
 231	/*
 232	* Must bump handle count first as this may be the last
 233	* ref, in which case the object would disappear before we
 234	* checked for a name
 235	*/
 236
 237	mutex_lock(&dev->object_name_lock);
 238	if (--obj->handle_count == 0) {
 239		drm_gem_object_handle_free(obj);
 240		drm_gem_object_exported_dma_buf_free(obj);
 241		final = true;
 242	}
 243	mutex_unlock(&dev->object_name_lock);
 244
 245	if (final)
 246		drm_gem_object_put_unlocked(obj);
 247}
 248
 249/*
 250 * Called at device or object close to release the file's
 251 * handle references on objects.
 252 */
 253static int
 254drm_gem_object_release_handle(int id, void *ptr, void *data)
 255{
 256	struct drm_file *file_priv = data;
 257	struct drm_gem_object *obj = ptr;
 258	struct drm_device *dev = obj->dev;
 259
 260	if (dev->driver->gem_close_object)
 261		dev->driver->gem_close_object(obj, file_priv);
 262
 263	if (drm_core_check_feature(dev, DRIVER_PRIME))
 264		drm_gem_remove_prime_handles(obj, file_priv);
 265	drm_vma_node_revoke(&obj->vma_node, file_priv);
 266
 267	drm_gem_object_handle_put_unlocked(obj);
 268
 269	return 0;
 270}
 
 271
 272/**
 273 * drm_gem_handle_delete - deletes the given file-private handle
 274 * @filp: drm file-private structure to use for the handle look up
 275 * @handle: userspace handle to delete
 276 *
 277 * Removes the GEM handle from the @filp lookup table which has been added with
 278 * drm_gem_handle_create(). If this is the last handle also cleans up linked
 279 * resources like GEM names.
 280 */
 281int
 282drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 283{
 
 284	struct drm_gem_object *obj;
 285
 
 
 
 
 
 
 
 
 
 286	spin_lock(&filp->table_lock);
 287
 288	/* Check if we currently have a reference on the object */
 289	obj = idr_replace(&filp->object_idr, NULL, handle);
 290	spin_unlock(&filp->table_lock);
 291	if (IS_ERR_OR_NULL(obj))
 292		return -EINVAL;
 
 
 293
 294	/* Release driver's reference and decrement refcount. */
 295	drm_gem_object_release_handle(handle, obj, filp);
 296
 297	/* And finally make the handle available for future allocations. */
 298	spin_lock(&filp->table_lock);
 299	idr_remove(&filp->object_idr, handle);
 300	spin_unlock(&filp->table_lock);
 301
 
 
 
 
 302	return 0;
 303}
 304EXPORT_SYMBOL(drm_gem_handle_delete);
 305
 306/**
 307 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
 308 * @file: drm file-private structure containing the gem object
 309 * @dev: corresponding drm_device
 310 * @handle: gem object handle
 311 * @offset: return location for the fake mmap offset
 312 *
 313 * This implements the &drm_driver.dumb_map_offset kms driver callback for
 314 * drivers which use gem to manage their backing storage.
 315 *
 316 * Returns:
 317 * 0 on success or a negative error code on failure.
 318 */
 319int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 320			    u32 handle, u64 *offset)
 321{
 322	struct drm_gem_object *obj;
 323	int ret;
 324
 325	obj = drm_gem_object_lookup(file, handle);
 326	if (!obj)
 327		return -ENOENT;
 328
 329	/* Don't allow imported objects to be mapped */
 330	if (obj->import_attach) {
 331		ret = -EINVAL;
 332		goto out;
 333	}
 334
 335	ret = drm_gem_create_mmap_offset(obj);
 336	if (ret)
 337		goto out;
 338
 339	*offset = drm_vma_node_offset_addr(&obj->vma_node);
 340out:
 341	drm_gem_object_put_unlocked(obj);
 342
 343	return ret;
 344}
 345EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
 346
 347/**
 348 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
 349 * @file: drm file-private structure to remove the dumb handle from
 350 * @dev: corresponding drm_device
 351 * @handle: the dumb handle to remove
 352 *
 353 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
 354 * which use gem to manage their backing storage.
 355 */
 356int drm_gem_dumb_destroy(struct drm_file *file,
 357			 struct drm_device *dev,
 358			 uint32_t handle)
 359{
 360	return drm_gem_handle_delete(file, handle);
 361}
 362EXPORT_SYMBOL(drm_gem_dumb_destroy);
 363
 364/**
 365 * drm_gem_handle_create_tail - internal functions to create a handle
 366 * @file_priv: drm file-private structure to register the handle for
 367 * @obj: object to register
 368 * @handlep: pointer to return the created handle to the caller
 369 *
 370 * This expects the &drm_device.object_name_lock to be held already and will
 371 * drop it before returning. Used to avoid races in establishing new handles
 372 * when importing an object from either an flink name or a dma-buf.
 373 *
 374 * Handles must be release again through drm_gem_handle_delete(). This is done
 375 * when userspace closes @file_priv for all attached handles, or through the
 376 * GEM_CLOSE ioctl for individual handles.
 377 */
 378int
 379drm_gem_handle_create_tail(struct drm_file *file_priv,
 380			   struct drm_gem_object *obj,
 381			   u32 *handlep)
 382{
 383	struct drm_device *dev = obj->dev;
 384	u32 handle;
 385	int ret;
 386
 387	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
 388	if (obj->handle_count++ == 0)
 389		drm_gem_object_get(obj);
 390
 391	/*
 392	 * Get the user-visible handle using idr.  Preload and perform
 393	 * allocation under our spinlock.
 394	 */
 395	idr_preload(GFP_KERNEL);
 
 
 
 
 
 396	spin_lock(&file_priv->table_lock);
 397
 398	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
 399
 400	spin_unlock(&file_priv->table_lock);
 401	idr_preload_end();
 
 402
 403	mutex_unlock(&dev->object_name_lock);
 404	if (ret < 0)
 405		goto err_unref;
 406
 407	handle = ret;
 408
 409	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
 410	if (ret)
 411		goto err_remove;
 412
 413	if (dev->driver->gem_open_object) {
 414		ret = dev->driver->gem_open_object(obj, file_priv);
 415		if (ret)
 416			goto err_revoke;
 
 
 417	}
 418
 419	*handlep = handle;
 420	return 0;
 421
 422err_revoke:
 423	drm_vma_node_revoke(&obj->vma_node, file_priv);
 424err_remove:
 425	spin_lock(&file_priv->table_lock);
 426	idr_remove(&file_priv->object_idr, handle);
 427	spin_unlock(&file_priv->table_lock);
 428err_unref:
 429	drm_gem_object_handle_put_unlocked(obj);
 430	return ret;
 431}
 432
 433/**
 434 * drm_gem_handle_create - create a gem handle for an object
 435 * @file_priv: drm file-private structure to register the handle for
 436 * @obj: object to register
 437 * @handlep: pionter to return the created handle to the caller
 438 *
 439 * Create a handle for this object. This adds a handle reference
 440 * to the object, which includes a regular reference count. Callers
 441 * will likely want to dereference the object afterwards.
 442 */
 443int drm_gem_handle_create(struct drm_file *file_priv,
 444			  struct drm_gem_object *obj,
 445			  u32 *handlep)
 446{
 447	mutex_lock(&obj->dev->object_name_lock);
 448
 449	return drm_gem_handle_create_tail(file_priv, obj, handlep);
 450}
 451EXPORT_SYMBOL(drm_gem_handle_create);
 452
 453
 454/**
 455 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
 456 * @obj: obj in question
 457 *
 458 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
 459 *
 460 * Note that drm_gem_object_release() already calls this function, so drivers
 461 * don't have to take care of releasing the mmap offset themselves when freeing
 462 * the GEM object.
 463 */
 464void
 465drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 466{
 467	struct drm_device *dev = obj->dev;
 468
 469	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
 470}
 471EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 472
 473/**
 474 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
 475 * @obj: obj in question
 476 * @size: the virtual size
 477 *
 478 * GEM memory mapping works by handing back to userspace a fake mmap offset
 479 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 480 * up the object based on the offset and sets up the various memory mapping
 481 * structures.
 482 *
 483 * This routine allocates and attaches a fake offset for @obj, in cases where
 484 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
 485 * Otherwise just use drm_gem_create_mmap_offset().
 486 *
 487 * This function is idempotent and handles an already allocated mmap offset
 488 * transparently. Drivers do not need to check for this case.
 489 */
 490int
 491drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
 492{
 493	struct drm_device *dev = obj->dev;
 494
 495	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
 496				  size / PAGE_SIZE);
 497}
 498EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
 499
 500/**
 501 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
 502 * @obj: obj in question
 503 *
 504 * GEM memory mapping works by handing back to userspace a fake mmap offset
 505 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 506 * up the object based on the offset and sets up the various memory mapping
 507 * structures.
 508 *
 509 * This routine allocates and attaches a fake offset for @obj.
 510 *
 511 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
 512 * the fake offset again.
 513 */
 514int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 515{
 516	return drm_gem_create_mmap_offset_size(obj, obj->size);
 517}
 518EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 519
 520/**
 521 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
 522 * from shmem
 523 * @obj: obj in question
 524 *
 525 * This reads the page-array of the shmem-backing storage of the given gem
 526 * object. An array of pages is returned. If a page is not allocated or
 527 * swapped-out, this will allocate/swap-in the required pages. Note that the
 528 * whole object is covered by the page-array and pinned in memory.
 529 *
 530 * Use drm_gem_put_pages() to release the array and unpin all pages.
 531 *
 532 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
 533 * If you require other GFP-masks, you have to do those allocations yourself.
 534 *
 535 * Note that you are not allowed to change gfp-zones during runtime. That is,
 536 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
 537 * set during initialization. If you have special zone constraints, set them
 538 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
 539 * to keep pages in the required zone during swap-in.
 540 */
 541struct page **drm_gem_get_pages(struct drm_gem_object *obj)
 542{
 543	struct address_space *mapping;
 544	struct page *p, **pages;
 545	int i, npages;
 546
 547	/* This is the shared memory object that backs the GEM resource */
 548	mapping = obj->filp->f_mapping;
 549
 550	/* We already BUG_ON() for non-page-aligned sizes in
 551	 * drm_gem_object_init(), so we should never hit this unless
 552	 * driver author is doing something really wrong:
 553	 */
 554	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
 555
 556	npages = obj->size >> PAGE_SHIFT;
 557
 558	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 559	if (pages == NULL)
 560		return ERR_PTR(-ENOMEM);
 561
 562	for (i = 0; i < npages; i++) {
 563		p = shmem_read_mapping_page(mapping, i);
 564		if (IS_ERR(p))
 565			goto fail;
 566		pages[i] = p;
 567
 568		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
 569		 * correct region during swapin. Note that this requires
 570		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
 571		 * so shmem can relocate pages during swapin if required.
 572		 */
 573		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
 574				(page_to_pfn(p) >= 0x00100000UL));
 575	}
 576
 577	return pages;
 578
 579fail:
 580	while (i--)
 581		put_page(pages[i]);
 582
 583	kvfree(pages);
 584	return ERR_CAST(p);
 585}
 586EXPORT_SYMBOL(drm_gem_get_pages);
 587
 588/**
 589 * drm_gem_put_pages - helper to free backing pages for a GEM object
 590 * @obj: obj in question
 591 * @pages: pages to free
 592 * @dirty: if true, pages will be marked as dirty
 593 * @accessed: if true, the pages will be marked as accessed
 594 */
 595void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 596		bool dirty, bool accessed)
 597{
 598	int i, npages;
 599
 600	/* We already BUG_ON() for non-page-aligned sizes in
 601	 * drm_gem_object_init(), so we should never hit this unless
 602	 * driver author is doing something really wrong:
 603	 */
 604	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
 605
 606	npages = obj->size >> PAGE_SHIFT;
 607
 608	for (i = 0; i < npages; i++) {
 609		if (dirty)
 610			set_page_dirty(pages[i]);
 611
 612		if (accessed)
 613			mark_page_accessed(pages[i]);
 614
 615		/* Undo the reference we took when populating the table */
 616		put_page(pages[i]);
 617	}
 618
 619	kvfree(pages);
 620}
 621EXPORT_SYMBOL(drm_gem_put_pages);
 622
 623/**
 624 * drm_gem_object_lookup - look up a GEM object from it's handle
 625 * @filp: DRM file private date
 626 * @handle: userspace handle
 627 *
 628 * Returns:
 629 *
 630 * A reference to the object named by the handle if such exists on @filp, NULL
 631 * otherwise.
 632 */
 633struct drm_gem_object *
 634drm_gem_object_lookup(struct drm_file *filp, u32 handle)
 
 635{
 636	struct drm_gem_object *obj;
 637
 638	spin_lock(&filp->table_lock);
 639
 640	/* Check if we currently have a reference on the object */
 641	obj = idr_find(&filp->object_idr, handle);
 642	if (obj)
 643		drm_gem_object_get(obj);
 
 
 
 
 644
 645	spin_unlock(&filp->table_lock);
 646
 647	return obj;
 648}
 649EXPORT_SYMBOL(drm_gem_object_lookup);
 650
 651/**
 652 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
 653 * @dev: drm_device
 654 * @data: ioctl data
 655 * @file_priv: drm file-private structure
 656 *
 657 * Releases the handle to an mm object.
 658 */
 659int
 660drm_gem_close_ioctl(struct drm_device *dev, void *data,
 661		    struct drm_file *file_priv)
 662{
 663	struct drm_gem_close *args = data;
 664	int ret;
 665
 666	if (!drm_core_check_feature(dev, DRIVER_GEM))
 667		return -ENODEV;
 668
 669	ret = drm_gem_handle_delete(file_priv, args->handle);
 670
 671	return ret;
 672}
 673
 674/**
 675 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
 676 * @dev: drm_device
 677 * @data: ioctl data
 678 * @file_priv: drm file-private structure
 679 *
 680 * Create a global name for an object, returning the name.
 681 *
 682 * Note that the name does not hold a reference; when the object
 683 * is freed, the name goes away.
 684 */
 685int
 686drm_gem_flink_ioctl(struct drm_device *dev, void *data,
 687		    struct drm_file *file_priv)
 688{
 689	struct drm_gem_flink *args = data;
 690	struct drm_gem_object *obj;
 691	int ret;
 692
 693	if (!drm_core_check_feature(dev, DRIVER_GEM))
 694		return -ENODEV;
 695
 696	obj = drm_gem_object_lookup(file_priv, args->handle);
 697	if (obj == NULL)
 698		return -ENOENT;
 699
 700	mutex_lock(&dev->object_name_lock);
 701	/* prevent races with concurrent gem_close. */
 702	if (obj->handle_count == 0) {
 703		ret = -ENOENT;
 704		goto err;
 705	}
 706
 
 707	if (!obj->name) {
 708		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
 709		if (ret < 0)
 
 
 
 
 
 
 
 710			goto err;
 711
 712		obj->name = ret;
 
 
 
 
 
 713	}
 714
 715	args->name = (uint64_t) obj->name;
 716	ret = 0;
 717
 718err:
 719	mutex_unlock(&dev->object_name_lock);
 720	drm_gem_object_put_unlocked(obj);
 721	return ret;
 722}
 723
 724/**
 725 * drm_gem_open - implementation of the GEM_OPEN ioctl
 726 * @dev: drm_device
 727 * @data: ioctl data
 728 * @file_priv: drm file-private structure
 729 *
 730 * Open an object using the global name, returning a handle and the size.
 731 *
 732 * This handle (of course) holds a reference to the object, so the object
 733 * will not go away until the handle is deleted.
 734 */
 735int
 736drm_gem_open_ioctl(struct drm_device *dev, void *data,
 737		   struct drm_file *file_priv)
 738{
 739	struct drm_gem_open *args = data;
 740	struct drm_gem_object *obj;
 741	int ret;
 742	u32 handle;
 743
 744	if (!drm_core_check_feature(dev, DRIVER_GEM))
 745		return -ENODEV;
 746
 747	mutex_lock(&dev->object_name_lock);
 748	obj = idr_find(&dev->object_name_idr, (int) args->name);
 749	if (obj) {
 750		drm_gem_object_get(obj);
 751	} else {
 752		mutex_unlock(&dev->object_name_lock);
 753		return -ENOENT;
 754	}
 755
 756	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
 757	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
 758	drm_gem_object_put_unlocked(obj);
 759	if (ret)
 760		return ret;
 761
 762	args->handle = handle;
 763	args->size = obj->size;
 764
 765	return 0;
 766}
 767
 768/**
 769 * gem_gem_open - initalizes GEM file-private structures at devnode open time
 770 * @dev: drm_device which is being opened by userspace
 771 * @file_private: drm file-private structure to set up
 772 *
 773 * Called at device open time, sets up the structure for handling refcounting
 774 * of mm objects.
 775 */
 776void
 777drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
 778{
 779	idr_init_base(&file_private->object_idr, 1);
 780	spin_lock_init(&file_private->table_lock);
 781}
 782
 783/**
 784 * drm_gem_release - release file-private GEM resources
 785 * @dev: drm_device which is being closed by userspace
 786 * @file_private: drm file-private structure to clean up
 787 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 788 * Called at close time when the filp is going away.
 789 *
 790 * Releases any remaining references on objects by this filp.
 791 */
 792void
 793drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 794{
 795	idr_for_each(&file_private->object_idr,
 796		     &drm_gem_object_release_handle, file_private);
 
 
 797	idr_destroy(&file_private->object_idr);
 798}
 799
 800/**
 801 * drm_gem_object_release - release GEM buffer object resources
 802 * @obj: GEM buffer object
 803 *
 804 * This releases any structures and resources used by @obj and is the invers of
 805 * drm_gem_object_init().
 806 */
 807void
 808drm_gem_object_release(struct drm_gem_object *obj)
 809{
 810	WARN_ON(obj->dma_buf);
 811
 812	if (obj->filp)
 813		fput(obj->filp);
 814
 815	drm_gem_free_mmap_offset(obj);
 816}
 817EXPORT_SYMBOL(drm_gem_object_release);
 818
 819/**
 820 * drm_gem_object_free - free a GEM object
 821 * @kref: kref of the object to free
 822 *
 823 * Called after the last reference to the object has been lost.
 824 * Must be called holding &drm_device.struct_mutex.
 825 *
 826 * Frees the object
 827 */
 828void
 829drm_gem_object_free(struct kref *kref)
 830{
 831	struct drm_gem_object *obj =
 832		container_of(kref, struct drm_gem_object, refcount);
 833	struct drm_device *dev = obj->dev;
 834
 835	if (dev->driver->gem_free_object_unlocked) {
 836		dev->driver->gem_free_object_unlocked(obj);
 837	} else if (dev->driver->gem_free_object) {
 838		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 839
 
 840		dev->driver->gem_free_object(obj);
 841	}
 842}
 843EXPORT_SYMBOL(drm_gem_object_free);
 844
 845/**
 846 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
 847 * @obj: GEM buffer object
 848 *
 849 * This releases a reference to @obj. Callers must not hold the
 850 * &drm_device.struct_mutex lock when calling this function.
 851 *
 852 * See also __drm_gem_object_put().
 853 */
 854void
 855drm_gem_object_put_unlocked(struct drm_gem_object *obj)
 856{
 857	struct drm_device *dev;
 858
 859	if (!obj)
 860		return;
 861
 862	dev = obj->dev;
 863
 864	if (dev->driver->gem_free_object_unlocked) {
 865		kref_put(&obj->refcount, drm_gem_object_free);
 866	} else {
 867		might_lock(&dev->struct_mutex);
 868		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
 869				&dev->struct_mutex))
 870			mutex_unlock(&dev->struct_mutex);
 871	}
 872}
 873EXPORT_SYMBOL(drm_gem_object_put_unlocked);
 874
 875/**
 876 * drm_gem_object_put - release a GEM buffer object reference
 877 * @obj: GEM buffer object
 878 *
 879 * This releases a reference to @obj. Callers must hold the
 880 * &drm_device.struct_mutex lock when calling this function, even when the
 881 * driver doesn't use &drm_device.struct_mutex for anything.
 882 *
 883 * For drivers not encumbered with legacy locking use
 884 * drm_gem_object_put_unlocked() instead.
 885 */
 886void
 887drm_gem_object_put(struct drm_gem_object *obj)
 888{
 889	if (obj) {
 890		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 891
 892		kref_put(&obj->refcount, drm_gem_object_free);
 893	}
 894}
 895EXPORT_SYMBOL(drm_gem_object_put);
 896
 897/**
 898 * drm_gem_vm_open - vma->ops->open implementation for GEM
 899 * @vma: VM area structure
 900 *
 901 * This function implements the #vm_operations_struct open() callback for GEM
 902 * drivers. This must be used together with drm_gem_vm_close().
 903 */
 904void drm_gem_vm_open(struct vm_area_struct *vma)
 905{
 906	struct drm_gem_object *obj = vma->vm_private_data;
 907
 908	drm_gem_object_get(obj);
 
 
 
 
 909}
 910EXPORT_SYMBOL(drm_gem_vm_open);
 911
 912/**
 913 * drm_gem_vm_close - vma->ops->close implementation for GEM
 914 * @vma: VM area structure
 915 *
 916 * This function implements the #vm_operations_struct close() callback for GEM
 917 * drivers. This must be used together with drm_gem_vm_open().
 918 */
 919void drm_gem_vm_close(struct vm_area_struct *vma)
 920{
 921	struct drm_gem_object *obj = vma->vm_private_data;
 
 922
 923	drm_gem_object_put_unlocked(obj);
 
 
 
 924}
 925EXPORT_SYMBOL(drm_gem_vm_close);
 926
 927/**
 928 * drm_gem_mmap_obj - memory map a GEM object
 929 * @obj: the GEM object to map
 930 * @obj_size: the object size to be mapped, in bytes
 931 * @vma: VMA for the area to be mapped
 932 *
 933 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
 934 * provided by the driver. Depending on their requirements, drivers can either
 935 * provide a fault handler in their gem_vm_ops (in which case any accesses to
 936 * the object will be trapped, to perform migration, GTT binding, surface
 937 * register allocation, or performance monitoring), or mmap the buffer memory
 938 * synchronously after calling drm_gem_mmap_obj.
 939 *
 940 * This function is mainly intended to implement the DMABUF mmap operation, when
 941 * the GEM object is not looked up based on its fake offset. To implement the
 942 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
 943 *
 944 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
 945 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
 946 * callers must verify access restrictions before calling this helper.
 947 *
 948 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
 949 * size, or if no gem_vm_ops are provided.
 950 */
 951int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 952		     struct vm_area_struct *vma)
 953{
 954	struct drm_device *dev = obj->dev;
 955
 956	/* Check for valid size. */
 957	if (obj_size < vma->vm_end - vma->vm_start)
 958		return -EINVAL;
 959
 960	if (!dev->driver->gem_vm_ops)
 961		return -EINVAL;
 962
 963	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 964	vma->vm_ops = dev->driver->gem_vm_ops;
 965	vma->vm_private_data = obj;
 966	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 967	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 968
 969	/* Take a ref for this mapping of the object, so that the fault
 970	 * handler can dereference the mmap offset's pointer to the object.
 971	 * This reference is cleaned up by the corresponding vm_close
 972	 * (which should happen whether the vma was created by this call, or
 973	 * by a vm_open due to mremap or partial unmap or whatever).
 974	 */
 975	drm_gem_object_get(obj);
 976
 977	return 0;
 978}
 979EXPORT_SYMBOL(drm_gem_mmap_obj);
 980
 981/**
 982 * drm_gem_mmap - memory map routine for GEM objects
 983 * @filp: DRM file pointer
 984 * @vma: VMA for the area to be mapped
 985 *
 986 * If a driver supports GEM object mapping, mmap calls on the DRM file
 987 * descriptor will end up here.
 988 *
 989 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
 990 * contain the fake offset we created when the GTT map ioctl was called on
 991 * the object) and map it with a call to drm_gem_mmap_obj().
 992 *
 993 * If the caller is not granted access to the buffer object, the mmap will fail
 994 * with EACCES. Please see the vma manager for more information.
 995 */
 996int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 997{
 998	struct drm_file *priv = filp->private_data;
 999	struct drm_device *dev = priv->minor->dev;
1000	struct drm_gem_object *obj = NULL;
1001	struct drm_vma_offset_node *node;
1002	int ret;
 
 
 
 
1003
1004	if (drm_dev_is_unplugged(dev))
1005		return -ENODEV;
 
 
1006
1007	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1008	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1009						  vma->vm_pgoff,
1010						  vma_pages(vma));
1011	if (likely(node)) {
1012		obj = container_of(node, struct drm_gem_object, vma_node);
1013		/*
1014		 * When the object is being freed, after it hits 0-refcnt it
1015		 * proceeds to tear down the object. In the process it will
1016		 * attempt to remove the VMA offset and so acquire this
1017		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1018		 * that matches our range, we know it is in the process of being
1019		 * destroyed and will be freed as soon as we release the lock -
1020		 * so we have to check for the 0-refcnted object and treat it as
1021		 * invalid.
1022		 */
1023		if (!kref_get_unless_zero(&obj->refcount))
1024			obj = NULL;
1025	}
1026	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1027
1028	if (!obj)
1029		return -EINVAL;
 
 
 
1030
1031	if (!drm_vma_node_is_allowed(node, priv)) {
1032		drm_gem_object_put_unlocked(obj);
1033		return -EACCES;
 
1034	}
1035
1036	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1037			       vma);
 
 
 
 
 
 
 
 
 
 
 
 
 
1038
1039	drm_gem_object_put_unlocked(obj);
 
1040
1041	return ret;
1042}
1043EXPORT_SYMBOL(drm_gem_mmap);
1044
1045void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1046			const struct drm_gem_object *obj)
1047{
1048	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1049	drm_printf_indent(p, indent, "refcount=%u\n",
1050			  kref_read(&obj->refcount));
1051	drm_printf_indent(p, indent, "start=%08lx\n",
1052			  drm_vma_node_start(&obj->vma_node));
1053	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1054	drm_printf_indent(p, indent, "imported=%s\n",
1055			  obj->import_attach ? "yes" : "no");
1056
1057	if (obj->dev->driver->gem_print_info)
1058		obj->dev->driver->gem_print_info(p, indent, obj);
1059}