Loading...
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h>
39#include <linux/mem_encrypt.h>
40#include <linux/pagevec.h>
41
42#include <drm/drm.h>
43#include <drm/drm_device.h>
44#include <drm/drm_drv.h>
45#include <drm/drm_file.h>
46#include <drm/drm_gem.h>
47#include <drm/drm_print.h>
48#include <drm/drm_vma_manager.h>
49
50#include "drm_internal.h"
51
52/** @file drm_gem.c
53 *
54 * This file provides some of the base ioctls and library routines for
55 * the graphics memory manager implemented by each device driver.
56 *
57 * Because various devices have different requirements in terms of
58 * synchronization and migration strategies, implementing that is left up to
59 * the driver, and all that the general API provides should be generic --
60 * allocating objects, reading/writing data with the cpu, freeing objects.
61 * Even there, platform-dependent optimizations for reading/writing data with
62 * the CPU mean we'll likely hook those out to driver-specific calls. However,
63 * the DRI2 implementation wants to have at least allocate/mmap be generic.
64 *
65 * The goal was to have swap-backed object allocation managed through
66 * struct file. However, file descriptors as handles to a struct file have
67 * two major failings:
68 * - Process limits prevent more than 1024 or so being used at a time by
69 * default.
70 * - Inability to allocate high fds will aggravate the X Server's select()
71 * handling, and likely that of many GL client applications as well.
72 *
73 * This led to a plan of using our own integer IDs (called handles, following
74 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
75 * ioctls. The objects themselves will still include the struct file so
76 * that we can transition to fds if the required kernel infrastructure shows
77 * up at a later date, and as our interface with shmfs for memory allocation.
78 */
79
80/**
81 * drm_gem_init - Initialize the GEM device fields
82 * @dev: drm_devic structure to initialize
83 */
84int
85drm_gem_init(struct drm_device *dev)
86{
87 struct drm_vma_offset_manager *vma_offset_manager;
88
89 mutex_init(&dev->object_name_lock);
90 idr_init_base(&dev->object_name_idr, 1);
91
92 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
93 if (!vma_offset_manager) {
94 DRM_ERROR("out of memory\n");
95 return -ENOMEM;
96 }
97
98 dev->vma_offset_manager = vma_offset_manager;
99 drm_vma_offset_manager_init(vma_offset_manager,
100 DRM_FILE_PAGE_OFFSET_START,
101 DRM_FILE_PAGE_OFFSET_SIZE);
102
103 return 0;
104}
105
106void
107drm_gem_destroy(struct drm_device *dev)
108{
109
110 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
111 kfree(dev->vma_offset_manager);
112 dev->vma_offset_manager = NULL;
113}
114
115/**
116 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
117 * @dev: drm_device the object should be initialized for
118 * @obj: drm_gem_object to initialize
119 * @size: object size
120 *
121 * Initialize an already allocated GEM object of the specified size with
122 * shmfs backing store.
123 */
124int drm_gem_object_init(struct drm_device *dev,
125 struct drm_gem_object *obj, size_t size)
126{
127 struct file *filp;
128
129 drm_gem_private_object_init(dev, obj, size);
130
131 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
132 if (IS_ERR(filp))
133 return PTR_ERR(filp);
134
135 obj->filp = filp;
136
137 return 0;
138}
139EXPORT_SYMBOL(drm_gem_object_init);
140
141/**
142 * drm_gem_private_object_init - initialize an allocated private GEM object
143 * @dev: drm_device the object should be initialized for
144 * @obj: drm_gem_object to initialize
145 * @size: object size
146 *
147 * Initialize an already allocated GEM object of the specified size with
148 * no GEM provided backing store. Instead the caller is responsible for
149 * backing the object and handling it.
150 */
151void drm_gem_private_object_init(struct drm_device *dev,
152 struct drm_gem_object *obj, size_t size)
153{
154 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
155
156 obj->dev = dev;
157 obj->filp = NULL;
158
159 kref_init(&obj->refcount);
160 obj->handle_count = 0;
161 obj->size = size;
162 dma_resv_init(&obj->_resv);
163 if (!obj->resv)
164 obj->resv = &obj->_resv;
165
166 drm_vma_node_reset(&obj->vma_node);
167}
168EXPORT_SYMBOL(drm_gem_private_object_init);
169
170static void
171drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
172{
173 /*
174 * Note: obj->dma_buf can't disappear as long as we still hold a
175 * handle reference in obj->handle_count.
176 */
177 mutex_lock(&filp->prime.lock);
178 if (obj->dma_buf) {
179 drm_prime_remove_buf_handle_locked(&filp->prime,
180 obj->dma_buf);
181 }
182 mutex_unlock(&filp->prime.lock);
183}
184
185/**
186 * drm_gem_object_handle_free - release resources bound to userspace handles
187 * @obj: GEM object to clean up.
188 *
189 * Called after the last handle to the object has been closed
190 *
191 * Removes any name for the object. Note that this must be
192 * called before drm_gem_object_free or we'll be touching
193 * freed memory
194 */
195static void drm_gem_object_handle_free(struct drm_gem_object *obj)
196{
197 struct drm_device *dev = obj->dev;
198
199 /* Remove any name for this object */
200 if (obj->name) {
201 idr_remove(&dev->object_name_idr, obj->name);
202 obj->name = 0;
203 }
204}
205
206static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
207{
208 /* Unbreak the reference cycle if we have an exported dma_buf. */
209 if (obj->dma_buf) {
210 dma_buf_put(obj->dma_buf);
211 obj->dma_buf = NULL;
212 }
213}
214
215static void
216drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
217{
218 struct drm_device *dev = obj->dev;
219 bool final = false;
220
221 if (WARN_ON(obj->handle_count == 0))
222 return;
223
224 /*
225 * Must bump handle count first as this may be the last
226 * ref, in which case the object would disappear before we
227 * checked for a name
228 */
229
230 mutex_lock(&dev->object_name_lock);
231 if (--obj->handle_count == 0) {
232 drm_gem_object_handle_free(obj);
233 drm_gem_object_exported_dma_buf_free(obj);
234 final = true;
235 }
236 mutex_unlock(&dev->object_name_lock);
237
238 if (final)
239 drm_gem_object_put_unlocked(obj);
240}
241
242/*
243 * Called at device or object close to release the file's
244 * handle references on objects.
245 */
246static int
247drm_gem_object_release_handle(int id, void *ptr, void *data)
248{
249 struct drm_file *file_priv = data;
250 struct drm_gem_object *obj = ptr;
251 struct drm_device *dev = obj->dev;
252
253 if (obj->funcs && obj->funcs->close)
254 obj->funcs->close(obj, file_priv);
255 else if (dev->driver->gem_close_object)
256 dev->driver->gem_close_object(obj, file_priv);
257
258 drm_gem_remove_prime_handles(obj, file_priv);
259 drm_vma_node_revoke(&obj->vma_node, file_priv);
260
261 drm_gem_object_handle_put_unlocked(obj);
262
263 return 0;
264}
265
266/**
267 * drm_gem_handle_delete - deletes the given file-private handle
268 * @filp: drm file-private structure to use for the handle look up
269 * @handle: userspace handle to delete
270 *
271 * Removes the GEM handle from the @filp lookup table which has been added with
272 * drm_gem_handle_create(). If this is the last handle also cleans up linked
273 * resources like GEM names.
274 */
275int
276drm_gem_handle_delete(struct drm_file *filp, u32 handle)
277{
278 struct drm_gem_object *obj;
279
280 spin_lock(&filp->table_lock);
281
282 /* Check if we currently have a reference on the object */
283 obj = idr_replace(&filp->object_idr, NULL, handle);
284 spin_unlock(&filp->table_lock);
285 if (IS_ERR_OR_NULL(obj))
286 return -EINVAL;
287
288 /* Release driver's reference and decrement refcount. */
289 drm_gem_object_release_handle(handle, obj, filp);
290
291 /* And finally make the handle available for future allocations. */
292 spin_lock(&filp->table_lock);
293 idr_remove(&filp->object_idr, handle);
294 spin_unlock(&filp->table_lock);
295
296 return 0;
297}
298EXPORT_SYMBOL(drm_gem_handle_delete);
299
300/**
301 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
302 * @file: drm file-private structure containing the gem object
303 * @dev: corresponding drm_device
304 * @handle: gem object handle
305 * @offset: return location for the fake mmap offset
306 *
307 * This implements the &drm_driver.dumb_map_offset kms driver callback for
308 * drivers which use gem to manage their backing storage.
309 *
310 * Returns:
311 * 0 on success or a negative error code on failure.
312 */
313int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
314 u32 handle, u64 *offset)
315{
316 struct drm_gem_object *obj;
317 int ret;
318
319 obj = drm_gem_object_lookup(file, handle);
320 if (!obj)
321 return -ENOENT;
322
323 /* Don't allow imported objects to be mapped */
324 if (obj->import_attach) {
325 ret = -EINVAL;
326 goto out;
327 }
328
329 ret = drm_gem_create_mmap_offset(obj);
330 if (ret)
331 goto out;
332
333 *offset = drm_vma_node_offset_addr(&obj->vma_node);
334out:
335 drm_gem_object_put_unlocked(obj);
336
337 return ret;
338}
339EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
340
341/**
342 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
343 * @file: drm file-private structure to remove the dumb handle from
344 * @dev: corresponding drm_device
345 * @handle: the dumb handle to remove
346 *
347 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
348 * which use gem to manage their backing storage.
349 */
350int drm_gem_dumb_destroy(struct drm_file *file,
351 struct drm_device *dev,
352 uint32_t handle)
353{
354 return drm_gem_handle_delete(file, handle);
355}
356EXPORT_SYMBOL(drm_gem_dumb_destroy);
357
358/**
359 * drm_gem_handle_create_tail - internal functions to create a handle
360 * @file_priv: drm file-private structure to register the handle for
361 * @obj: object to register
362 * @handlep: pointer to return the created handle to the caller
363 *
364 * This expects the &drm_device.object_name_lock to be held already and will
365 * drop it before returning. Used to avoid races in establishing new handles
366 * when importing an object from either an flink name or a dma-buf.
367 *
368 * Handles must be release again through drm_gem_handle_delete(). This is done
369 * when userspace closes @file_priv for all attached handles, or through the
370 * GEM_CLOSE ioctl for individual handles.
371 */
372int
373drm_gem_handle_create_tail(struct drm_file *file_priv,
374 struct drm_gem_object *obj,
375 u32 *handlep)
376{
377 struct drm_device *dev = obj->dev;
378 u32 handle;
379 int ret;
380
381 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
382 if (obj->handle_count++ == 0)
383 drm_gem_object_get(obj);
384
385 /*
386 * Get the user-visible handle using idr. Preload and perform
387 * allocation under our spinlock.
388 */
389 idr_preload(GFP_KERNEL);
390 spin_lock(&file_priv->table_lock);
391
392 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
393
394 spin_unlock(&file_priv->table_lock);
395 idr_preload_end();
396
397 mutex_unlock(&dev->object_name_lock);
398 if (ret < 0)
399 goto err_unref;
400
401 handle = ret;
402
403 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
404 if (ret)
405 goto err_remove;
406
407 if (obj->funcs && obj->funcs->open) {
408 ret = obj->funcs->open(obj, file_priv);
409 if (ret)
410 goto err_revoke;
411 } else if (dev->driver->gem_open_object) {
412 ret = dev->driver->gem_open_object(obj, file_priv);
413 if (ret)
414 goto err_revoke;
415 }
416
417 *handlep = handle;
418 return 0;
419
420err_revoke:
421 drm_vma_node_revoke(&obj->vma_node, file_priv);
422err_remove:
423 spin_lock(&file_priv->table_lock);
424 idr_remove(&file_priv->object_idr, handle);
425 spin_unlock(&file_priv->table_lock);
426err_unref:
427 drm_gem_object_handle_put_unlocked(obj);
428 return ret;
429}
430
431/**
432 * drm_gem_handle_create - create a gem handle for an object
433 * @file_priv: drm file-private structure to register the handle for
434 * @obj: object to register
435 * @handlep: pionter to return the created handle to the caller
436 *
437 * Create a handle for this object. This adds a handle reference to the object,
438 * which includes a regular reference count. Callers will likely want to
439 * dereference the object afterwards.
440 *
441 * Since this publishes @obj to userspace it must be fully set up by this point,
442 * drivers must call this last in their buffer object creation callbacks.
443 */
444int drm_gem_handle_create(struct drm_file *file_priv,
445 struct drm_gem_object *obj,
446 u32 *handlep)
447{
448 mutex_lock(&obj->dev->object_name_lock);
449
450 return drm_gem_handle_create_tail(file_priv, obj, handlep);
451}
452EXPORT_SYMBOL(drm_gem_handle_create);
453
454
455/**
456 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
457 * @obj: obj in question
458 *
459 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
460 *
461 * Note that drm_gem_object_release() already calls this function, so drivers
462 * don't have to take care of releasing the mmap offset themselves when freeing
463 * the GEM object.
464 */
465void
466drm_gem_free_mmap_offset(struct drm_gem_object *obj)
467{
468 struct drm_device *dev = obj->dev;
469
470 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
471}
472EXPORT_SYMBOL(drm_gem_free_mmap_offset);
473
474/**
475 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
476 * @obj: obj in question
477 * @size: the virtual size
478 *
479 * GEM memory mapping works by handing back to userspace a fake mmap offset
480 * it can use in a subsequent mmap(2) call. The DRM core code then looks
481 * up the object based on the offset and sets up the various memory mapping
482 * structures.
483 *
484 * This routine allocates and attaches a fake offset for @obj, in cases where
485 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
486 * Otherwise just use drm_gem_create_mmap_offset().
487 *
488 * This function is idempotent and handles an already allocated mmap offset
489 * transparently. Drivers do not need to check for this case.
490 */
491int
492drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
493{
494 struct drm_device *dev = obj->dev;
495
496 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
497 size / PAGE_SIZE);
498}
499EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
500
501/**
502 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
503 * @obj: obj in question
504 *
505 * GEM memory mapping works by handing back to userspace a fake mmap offset
506 * it can use in a subsequent mmap(2) call. The DRM core code then looks
507 * up the object based on the offset and sets up the various memory mapping
508 * structures.
509 *
510 * This routine allocates and attaches a fake offset for @obj.
511 *
512 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
513 * the fake offset again.
514 */
515int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
516{
517 return drm_gem_create_mmap_offset_size(obj, obj->size);
518}
519EXPORT_SYMBOL(drm_gem_create_mmap_offset);
520
521/*
522 * Move pages to appropriate lru and release the pagevec, decrementing the
523 * ref count of those pages.
524 */
525static void drm_gem_check_release_pagevec(struct pagevec *pvec)
526{
527 check_move_unevictable_pages(pvec);
528 __pagevec_release(pvec);
529 cond_resched();
530}
531
532/**
533 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
534 * from shmem
535 * @obj: obj in question
536 *
537 * This reads the page-array of the shmem-backing storage of the given gem
538 * object. An array of pages is returned. If a page is not allocated or
539 * swapped-out, this will allocate/swap-in the required pages. Note that the
540 * whole object is covered by the page-array and pinned in memory.
541 *
542 * Use drm_gem_put_pages() to release the array and unpin all pages.
543 *
544 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
545 * If you require other GFP-masks, you have to do those allocations yourself.
546 *
547 * Note that you are not allowed to change gfp-zones during runtime. That is,
548 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
549 * set during initialization. If you have special zone constraints, set them
550 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
551 * to keep pages in the required zone during swap-in.
552 */
553struct page **drm_gem_get_pages(struct drm_gem_object *obj)
554{
555 struct address_space *mapping;
556 struct page *p, **pages;
557 struct pagevec pvec;
558 int i, npages;
559
560 /* This is the shared memory object that backs the GEM resource */
561 mapping = obj->filp->f_mapping;
562
563 /* We already BUG_ON() for non-page-aligned sizes in
564 * drm_gem_object_init(), so we should never hit this unless
565 * driver author is doing something really wrong:
566 */
567 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
568
569 npages = obj->size >> PAGE_SHIFT;
570
571 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
572 if (pages == NULL)
573 return ERR_PTR(-ENOMEM);
574
575 mapping_set_unevictable(mapping);
576
577 for (i = 0; i < npages; i++) {
578 p = shmem_read_mapping_page(mapping, i);
579 if (IS_ERR(p))
580 goto fail;
581 pages[i] = p;
582
583 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
584 * correct region during swapin. Note that this requires
585 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
586 * so shmem can relocate pages during swapin if required.
587 */
588 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
589 (page_to_pfn(p) >= 0x00100000UL));
590 }
591
592 return pages;
593
594fail:
595 mapping_clear_unevictable(mapping);
596 pagevec_init(&pvec);
597 while (i--) {
598 if (!pagevec_add(&pvec, pages[i]))
599 drm_gem_check_release_pagevec(&pvec);
600 }
601 if (pagevec_count(&pvec))
602 drm_gem_check_release_pagevec(&pvec);
603
604 kvfree(pages);
605 return ERR_CAST(p);
606}
607EXPORT_SYMBOL(drm_gem_get_pages);
608
609/**
610 * drm_gem_put_pages - helper to free backing pages for a GEM object
611 * @obj: obj in question
612 * @pages: pages to free
613 * @dirty: if true, pages will be marked as dirty
614 * @accessed: if true, the pages will be marked as accessed
615 */
616void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
617 bool dirty, bool accessed)
618{
619 int i, npages;
620 struct address_space *mapping;
621 struct pagevec pvec;
622
623 mapping = file_inode(obj->filp)->i_mapping;
624 mapping_clear_unevictable(mapping);
625
626 /* We already BUG_ON() for non-page-aligned sizes in
627 * drm_gem_object_init(), so we should never hit this unless
628 * driver author is doing something really wrong:
629 */
630 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
631
632 npages = obj->size >> PAGE_SHIFT;
633
634 pagevec_init(&pvec);
635 for (i = 0; i < npages; i++) {
636 if (!pages[i])
637 continue;
638
639 if (dirty)
640 set_page_dirty(pages[i]);
641
642 if (accessed)
643 mark_page_accessed(pages[i]);
644
645 /* Undo the reference we took when populating the table */
646 if (!pagevec_add(&pvec, pages[i]))
647 drm_gem_check_release_pagevec(&pvec);
648 }
649 if (pagevec_count(&pvec))
650 drm_gem_check_release_pagevec(&pvec);
651
652 kvfree(pages);
653}
654EXPORT_SYMBOL(drm_gem_put_pages);
655
656static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
657 struct drm_gem_object **objs)
658{
659 int i, ret = 0;
660 struct drm_gem_object *obj;
661
662 spin_lock(&filp->table_lock);
663
664 for (i = 0; i < count; i++) {
665 /* Check if we currently have a reference on the object */
666 obj = idr_find(&filp->object_idr, handle[i]);
667 if (!obj) {
668 ret = -ENOENT;
669 break;
670 }
671 drm_gem_object_get(obj);
672 objs[i] = obj;
673 }
674 spin_unlock(&filp->table_lock);
675
676 return ret;
677}
678
679/**
680 * drm_gem_objects_lookup - look up GEM objects from an array of handles
681 * @filp: DRM file private date
682 * @bo_handles: user pointer to array of userspace handle
683 * @count: size of handle array
684 * @objs_out: returned pointer to array of drm_gem_object pointers
685 *
686 * Takes an array of userspace handles and returns a newly allocated array of
687 * GEM objects.
688 *
689 * For a single handle lookup, use drm_gem_object_lookup().
690 *
691 * Returns:
692 *
693 * @objs filled in with GEM object pointers. Returned GEM objects need to be
694 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
695 * failure. 0 is returned on success.
696 *
697 */
698int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
699 int count, struct drm_gem_object ***objs_out)
700{
701 int ret;
702 u32 *handles;
703 struct drm_gem_object **objs;
704
705 if (!count)
706 return 0;
707
708 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
709 GFP_KERNEL | __GFP_ZERO);
710 if (!objs)
711 return -ENOMEM;
712
713 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
714 if (!handles) {
715 ret = -ENOMEM;
716 goto out;
717 }
718
719 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
720 ret = -EFAULT;
721 DRM_DEBUG("Failed to copy in GEM handles\n");
722 goto out;
723 }
724
725 ret = objects_lookup(filp, handles, count, objs);
726 *objs_out = objs;
727
728out:
729 kvfree(handles);
730 return ret;
731
732}
733EXPORT_SYMBOL(drm_gem_objects_lookup);
734
735/**
736 * drm_gem_object_lookup - look up a GEM object from its handle
737 * @filp: DRM file private date
738 * @handle: userspace handle
739 *
740 * Returns:
741 *
742 * A reference to the object named by the handle if such exists on @filp, NULL
743 * otherwise.
744 *
745 * If looking up an array of handles, use drm_gem_objects_lookup().
746 */
747struct drm_gem_object *
748drm_gem_object_lookup(struct drm_file *filp, u32 handle)
749{
750 struct drm_gem_object *obj = NULL;
751
752 objects_lookup(filp, &handle, 1, &obj);
753 return obj;
754}
755EXPORT_SYMBOL(drm_gem_object_lookup);
756
757/**
758 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
759 * shared and/or exclusive fences.
760 * @filep: DRM file private date
761 * @handle: userspace handle
762 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
763 * @timeout: timeout value in jiffies or zero to return immediately
764 *
765 * Returns:
766 *
767 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
768 * greater than 0 on success.
769 */
770long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
771 bool wait_all, unsigned long timeout)
772{
773 long ret;
774 struct drm_gem_object *obj;
775
776 obj = drm_gem_object_lookup(filep, handle);
777 if (!obj) {
778 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
779 return -EINVAL;
780 }
781
782 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
783 true, timeout);
784 if (ret == 0)
785 ret = -ETIME;
786 else if (ret > 0)
787 ret = 0;
788
789 drm_gem_object_put_unlocked(obj);
790
791 return ret;
792}
793EXPORT_SYMBOL(drm_gem_dma_resv_wait);
794
795/**
796 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
797 * @dev: drm_device
798 * @data: ioctl data
799 * @file_priv: drm file-private structure
800 *
801 * Releases the handle to an mm object.
802 */
803int
804drm_gem_close_ioctl(struct drm_device *dev, void *data,
805 struct drm_file *file_priv)
806{
807 struct drm_gem_close *args = data;
808 int ret;
809
810 if (!drm_core_check_feature(dev, DRIVER_GEM))
811 return -EOPNOTSUPP;
812
813 ret = drm_gem_handle_delete(file_priv, args->handle);
814
815 return ret;
816}
817
818/**
819 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
820 * @dev: drm_device
821 * @data: ioctl data
822 * @file_priv: drm file-private structure
823 *
824 * Create a global name for an object, returning the name.
825 *
826 * Note that the name does not hold a reference; when the object
827 * is freed, the name goes away.
828 */
829int
830drm_gem_flink_ioctl(struct drm_device *dev, void *data,
831 struct drm_file *file_priv)
832{
833 struct drm_gem_flink *args = data;
834 struct drm_gem_object *obj;
835 int ret;
836
837 if (!drm_core_check_feature(dev, DRIVER_GEM))
838 return -EOPNOTSUPP;
839
840 obj = drm_gem_object_lookup(file_priv, args->handle);
841 if (obj == NULL)
842 return -ENOENT;
843
844 mutex_lock(&dev->object_name_lock);
845 /* prevent races with concurrent gem_close. */
846 if (obj->handle_count == 0) {
847 ret = -ENOENT;
848 goto err;
849 }
850
851 if (!obj->name) {
852 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
853 if (ret < 0)
854 goto err;
855
856 obj->name = ret;
857 }
858
859 args->name = (uint64_t) obj->name;
860 ret = 0;
861
862err:
863 mutex_unlock(&dev->object_name_lock);
864 drm_gem_object_put_unlocked(obj);
865 return ret;
866}
867
868/**
869 * drm_gem_open - implementation of the GEM_OPEN ioctl
870 * @dev: drm_device
871 * @data: ioctl data
872 * @file_priv: drm file-private structure
873 *
874 * Open an object using the global name, returning a handle and the size.
875 *
876 * This handle (of course) holds a reference to the object, so the object
877 * will not go away until the handle is deleted.
878 */
879int
880drm_gem_open_ioctl(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882{
883 struct drm_gem_open *args = data;
884 struct drm_gem_object *obj;
885 int ret;
886 u32 handle;
887
888 if (!drm_core_check_feature(dev, DRIVER_GEM))
889 return -EOPNOTSUPP;
890
891 mutex_lock(&dev->object_name_lock);
892 obj = idr_find(&dev->object_name_idr, (int) args->name);
893 if (obj) {
894 drm_gem_object_get(obj);
895 } else {
896 mutex_unlock(&dev->object_name_lock);
897 return -ENOENT;
898 }
899
900 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
901 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
902 drm_gem_object_put_unlocked(obj);
903 if (ret)
904 return ret;
905
906 args->handle = handle;
907 args->size = obj->size;
908
909 return 0;
910}
911
912/**
913 * gem_gem_open - initalizes GEM file-private structures at devnode open time
914 * @dev: drm_device which is being opened by userspace
915 * @file_private: drm file-private structure to set up
916 *
917 * Called at device open time, sets up the structure for handling refcounting
918 * of mm objects.
919 */
920void
921drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
922{
923 idr_init_base(&file_private->object_idr, 1);
924 spin_lock_init(&file_private->table_lock);
925}
926
927/**
928 * drm_gem_release - release file-private GEM resources
929 * @dev: drm_device which is being closed by userspace
930 * @file_private: drm file-private structure to clean up
931 *
932 * Called at close time when the filp is going away.
933 *
934 * Releases any remaining references on objects by this filp.
935 */
936void
937drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
938{
939 idr_for_each(&file_private->object_idr,
940 &drm_gem_object_release_handle, file_private);
941 idr_destroy(&file_private->object_idr);
942}
943
944/**
945 * drm_gem_object_release - release GEM buffer object resources
946 * @obj: GEM buffer object
947 *
948 * This releases any structures and resources used by @obj and is the invers of
949 * drm_gem_object_init().
950 */
951void
952drm_gem_object_release(struct drm_gem_object *obj)
953{
954 WARN_ON(obj->dma_buf);
955
956 if (obj->filp)
957 fput(obj->filp);
958
959 dma_resv_fini(&obj->_resv);
960 drm_gem_free_mmap_offset(obj);
961}
962EXPORT_SYMBOL(drm_gem_object_release);
963
964/**
965 * drm_gem_object_free - free a GEM object
966 * @kref: kref of the object to free
967 *
968 * Called after the last reference to the object has been lost.
969 * Must be called holding &drm_device.struct_mutex.
970 *
971 * Frees the object
972 */
973void
974drm_gem_object_free(struct kref *kref)
975{
976 struct drm_gem_object *obj =
977 container_of(kref, struct drm_gem_object, refcount);
978 struct drm_device *dev = obj->dev;
979
980 if (obj->funcs) {
981 obj->funcs->free(obj);
982 } else if (dev->driver->gem_free_object_unlocked) {
983 dev->driver->gem_free_object_unlocked(obj);
984 } else if (dev->driver->gem_free_object) {
985 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
986
987 dev->driver->gem_free_object(obj);
988 }
989}
990EXPORT_SYMBOL(drm_gem_object_free);
991
992/**
993 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
994 * @obj: GEM buffer object
995 *
996 * This releases a reference to @obj. Callers must not hold the
997 * &drm_device.struct_mutex lock when calling this function.
998 *
999 * See also __drm_gem_object_put().
1000 */
1001void
1002drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1003{
1004 struct drm_device *dev;
1005
1006 if (!obj)
1007 return;
1008
1009 dev = obj->dev;
1010
1011 if (dev->driver->gem_free_object) {
1012 might_lock(&dev->struct_mutex);
1013 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1014 &dev->struct_mutex))
1015 mutex_unlock(&dev->struct_mutex);
1016 } else {
1017 kref_put(&obj->refcount, drm_gem_object_free);
1018 }
1019}
1020EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1021
1022/**
1023 * drm_gem_object_put - release a GEM buffer object reference
1024 * @obj: GEM buffer object
1025 *
1026 * This releases a reference to @obj. Callers must hold the
1027 * &drm_device.struct_mutex lock when calling this function, even when the
1028 * driver doesn't use &drm_device.struct_mutex for anything.
1029 *
1030 * For drivers not encumbered with legacy locking use
1031 * drm_gem_object_put_unlocked() instead.
1032 */
1033void
1034drm_gem_object_put(struct drm_gem_object *obj)
1035{
1036 if (obj) {
1037 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1038
1039 kref_put(&obj->refcount, drm_gem_object_free);
1040 }
1041}
1042EXPORT_SYMBOL(drm_gem_object_put);
1043
1044/**
1045 * drm_gem_vm_open - vma->ops->open implementation for GEM
1046 * @vma: VM area structure
1047 *
1048 * This function implements the #vm_operations_struct open() callback for GEM
1049 * drivers. This must be used together with drm_gem_vm_close().
1050 */
1051void drm_gem_vm_open(struct vm_area_struct *vma)
1052{
1053 struct drm_gem_object *obj = vma->vm_private_data;
1054
1055 drm_gem_object_get(obj);
1056}
1057EXPORT_SYMBOL(drm_gem_vm_open);
1058
1059/**
1060 * drm_gem_vm_close - vma->ops->close implementation for GEM
1061 * @vma: VM area structure
1062 *
1063 * This function implements the #vm_operations_struct close() callback for GEM
1064 * drivers. This must be used together with drm_gem_vm_open().
1065 */
1066void drm_gem_vm_close(struct vm_area_struct *vma)
1067{
1068 struct drm_gem_object *obj = vma->vm_private_data;
1069
1070 drm_gem_object_put_unlocked(obj);
1071}
1072EXPORT_SYMBOL(drm_gem_vm_close);
1073
1074/**
1075 * drm_gem_mmap_obj - memory map a GEM object
1076 * @obj: the GEM object to map
1077 * @obj_size: the object size to be mapped, in bytes
1078 * @vma: VMA for the area to be mapped
1079 *
1080 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1081 * provided by the driver. Depending on their requirements, drivers can either
1082 * provide a fault handler in their gem_vm_ops (in which case any accesses to
1083 * the object will be trapped, to perform migration, GTT binding, surface
1084 * register allocation, or performance monitoring), or mmap the buffer memory
1085 * synchronously after calling drm_gem_mmap_obj.
1086 *
1087 * This function is mainly intended to implement the DMABUF mmap operation, when
1088 * the GEM object is not looked up based on its fake offset. To implement the
1089 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1090 *
1091 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1092 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1093 * callers must verify access restrictions before calling this helper.
1094 *
1095 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1096 * size, or if no gem_vm_ops are provided.
1097 */
1098int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1099 struct vm_area_struct *vma)
1100{
1101 struct drm_device *dev = obj->dev;
1102
1103 /* Check for valid size. */
1104 if (obj_size < vma->vm_end - vma->vm_start)
1105 return -EINVAL;
1106
1107 if (obj->funcs && obj->funcs->vm_ops)
1108 vma->vm_ops = obj->funcs->vm_ops;
1109 else if (dev->driver->gem_vm_ops)
1110 vma->vm_ops = dev->driver->gem_vm_ops;
1111 else
1112 return -EINVAL;
1113
1114 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1115 vma->vm_private_data = obj;
1116 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1117 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1118
1119 /* Take a ref for this mapping of the object, so that the fault
1120 * handler can dereference the mmap offset's pointer to the object.
1121 * This reference is cleaned up by the corresponding vm_close
1122 * (which should happen whether the vma was created by this call, or
1123 * by a vm_open due to mremap or partial unmap or whatever).
1124 */
1125 drm_gem_object_get(obj);
1126
1127 return 0;
1128}
1129EXPORT_SYMBOL(drm_gem_mmap_obj);
1130
1131/**
1132 * drm_gem_mmap - memory map routine for GEM objects
1133 * @filp: DRM file pointer
1134 * @vma: VMA for the area to be mapped
1135 *
1136 * If a driver supports GEM object mapping, mmap calls on the DRM file
1137 * descriptor will end up here.
1138 *
1139 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1140 * contain the fake offset we created when the GTT map ioctl was called on
1141 * the object) and map it with a call to drm_gem_mmap_obj().
1142 *
1143 * If the caller is not granted access to the buffer object, the mmap will fail
1144 * with EACCES. Please see the vma manager for more information.
1145 */
1146int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1147{
1148 struct drm_file *priv = filp->private_data;
1149 struct drm_device *dev = priv->minor->dev;
1150 struct drm_gem_object *obj = NULL;
1151 struct drm_vma_offset_node *node;
1152 int ret;
1153
1154 if (drm_dev_is_unplugged(dev))
1155 return -ENODEV;
1156
1157 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1158 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1159 vma->vm_pgoff,
1160 vma_pages(vma));
1161 if (likely(node)) {
1162 obj = container_of(node, struct drm_gem_object, vma_node);
1163 /*
1164 * When the object is being freed, after it hits 0-refcnt it
1165 * proceeds to tear down the object. In the process it will
1166 * attempt to remove the VMA offset and so acquire this
1167 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1168 * that matches our range, we know it is in the process of being
1169 * destroyed and will be freed as soon as we release the lock -
1170 * so we have to check for the 0-refcnted object and treat it as
1171 * invalid.
1172 */
1173 if (!kref_get_unless_zero(&obj->refcount))
1174 obj = NULL;
1175 }
1176 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1177
1178 if (!obj)
1179 return -EINVAL;
1180
1181 if (!drm_vma_node_is_allowed(node, priv)) {
1182 drm_gem_object_put_unlocked(obj);
1183 return -EACCES;
1184 }
1185
1186 if (node->readonly) {
1187 if (vma->vm_flags & VM_WRITE) {
1188 drm_gem_object_put_unlocked(obj);
1189 return -EINVAL;
1190 }
1191
1192 vma->vm_flags &= ~VM_MAYWRITE;
1193 }
1194
1195 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1196 vma);
1197
1198 drm_gem_object_put_unlocked(obj);
1199
1200 return ret;
1201}
1202EXPORT_SYMBOL(drm_gem_mmap);
1203
1204void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1205 const struct drm_gem_object *obj)
1206{
1207 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1208 drm_printf_indent(p, indent, "refcount=%u\n",
1209 kref_read(&obj->refcount));
1210 drm_printf_indent(p, indent, "start=%08lx\n",
1211 drm_vma_node_start(&obj->vma_node));
1212 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1213 drm_printf_indent(p, indent, "imported=%s\n",
1214 obj->import_attach ? "yes" : "no");
1215
1216 if (obj->funcs && obj->funcs->print_info)
1217 obj->funcs->print_info(p, indent, obj);
1218 else if (obj->dev->driver->gem_print_info)
1219 obj->dev->driver->gem_print_info(p, indent, obj);
1220}
1221
1222int drm_gem_pin(struct drm_gem_object *obj)
1223{
1224 if (obj->funcs && obj->funcs->pin)
1225 return obj->funcs->pin(obj);
1226 else if (obj->dev->driver->gem_prime_pin)
1227 return obj->dev->driver->gem_prime_pin(obj);
1228 else
1229 return 0;
1230}
1231
1232void drm_gem_unpin(struct drm_gem_object *obj)
1233{
1234 if (obj->funcs && obj->funcs->unpin)
1235 obj->funcs->unpin(obj);
1236 else if (obj->dev->driver->gem_prime_unpin)
1237 obj->dev->driver->gem_prime_unpin(obj);
1238}
1239
1240void *drm_gem_vmap(struct drm_gem_object *obj)
1241{
1242 void *vaddr;
1243
1244 if (obj->funcs && obj->funcs->vmap)
1245 vaddr = obj->funcs->vmap(obj);
1246 else if (obj->dev->driver->gem_prime_vmap)
1247 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1248 else
1249 vaddr = ERR_PTR(-EOPNOTSUPP);
1250
1251 if (!vaddr)
1252 vaddr = ERR_PTR(-ENOMEM);
1253
1254 return vaddr;
1255}
1256
1257void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1258{
1259 if (!vaddr)
1260 return;
1261
1262 if (obj->funcs && obj->funcs->vunmap)
1263 obj->funcs->vunmap(obj, vaddr);
1264 else if (obj->dev->driver->gem_prime_vunmap)
1265 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1266}
1267
1268/**
1269 * drm_gem_lock_reservations - Sets up the ww context and acquires
1270 * the lock on an array of GEM objects.
1271 *
1272 * Once you've locked your reservations, you'll want to set up space
1273 * for your shared fences (if applicable), submit your job, then
1274 * drm_gem_unlock_reservations().
1275 *
1276 * @objs: drm_gem_objects to lock
1277 * @count: Number of objects in @objs
1278 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1279 * part of tracking this set of locked reservations.
1280 */
1281int
1282drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1283 struct ww_acquire_ctx *acquire_ctx)
1284{
1285 int contended = -1;
1286 int i, ret;
1287
1288 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1289
1290retry:
1291 if (contended != -1) {
1292 struct drm_gem_object *obj = objs[contended];
1293
1294 ret = dma_resv_lock_slow_interruptible(obj->resv,
1295 acquire_ctx);
1296 if (ret) {
1297 ww_acquire_done(acquire_ctx);
1298 return ret;
1299 }
1300 }
1301
1302 for (i = 0; i < count; i++) {
1303 if (i == contended)
1304 continue;
1305
1306 ret = dma_resv_lock_interruptible(objs[i]->resv,
1307 acquire_ctx);
1308 if (ret) {
1309 int j;
1310
1311 for (j = 0; j < i; j++)
1312 dma_resv_unlock(objs[j]->resv);
1313
1314 if (contended != -1 && contended >= i)
1315 dma_resv_unlock(objs[contended]->resv);
1316
1317 if (ret == -EDEADLK) {
1318 contended = i;
1319 goto retry;
1320 }
1321
1322 ww_acquire_done(acquire_ctx);
1323 return ret;
1324 }
1325 }
1326
1327 ww_acquire_done(acquire_ctx);
1328
1329 return 0;
1330}
1331EXPORT_SYMBOL(drm_gem_lock_reservations);
1332
1333void
1334drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1335 struct ww_acquire_ctx *acquire_ctx)
1336{
1337 int i;
1338
1339 for (i = 0; i < count; i++)
1340 dma_resv_unlock(objs[i]->resv);
1341
1342 ww_acquire_fini(acquire_ctx);
1343}
1344EXPORT_SYMBOL(drm_gem_unlock_reservations);
1345
1346/**
1347 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1348 * waited on, deduplicating fences from the same context.
1349 *
1350 * @fence_array: array of dma_fence * for the job to block on.
1351 * @fence: the dma_fence to add to the list of dependencies.
1352 *
1353 * Returns:
1354 * 0 on success, or an error on failing to expand the array.
1355 */
1356int drm_gem_fence_array_add(struct xarray *fence_array,
1357 struct dma_fence *fence)
1358{
1359 struct dma_fence *entry;
1360 unsigned long index;
1361 u32 id = 0;
1362 int ret;
1363
1364 if (!fence)
1365 return 0;
1366
1367 /* Deduplicate if we already depend on a fence from the same context.
1368 * This lets the size of the array of deps scale with the number of
1369 * engines involved, rather than the number of BOs.
1370 */
1371 xa_for_each(fence_array, index, entry) {
1372 if (entry->context != fence->context)
1373 continue;
1374
1375 if (dma_fence_is_later(fence, entry)) {
1376 dma_fence_put(entry);
1377 xa_store(fence_array, index, fence, GFP_KERNEL);
1378 } else {
1379 dma_fence_put(fence);
1380 }
1381 return 0;
1382 }
1383
1384 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1385 if (ret != 0)
1386 dma_fence_put(fence);
1387
1388 return ret;
1389}
1390EXPORT_SYMBOL(drm_gem_fence_array_add);
1391
1392/**
1393 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1394 * in the GEM object's reservation object to an array of dma_fences for use in
1395 * scheduling a rendering job.
1396 *
1397 * This should be called after drm_gem_lock_reservations() on your array of
1398 * GEM objects used in the job but before updating the reservations with your
1399 * own fences.
1400 *
1401 * @fence_array: array of dma_fence * for the job to block on.
1402 * @obj: the gem object to add new dependencies from.
1403 * @write: whether the job might write the object (so we need to depend on
1404 * shared fences in the reservation object).
1405 */
1406int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1407 struct drm_gem_object *obj,
1408 bool write)
1409{
1410 int ret;
1411 struct dma_fence **fences;
1412 unsigned int i, fence_count;
1413
1414 if (!write) {
1415 struct dma_fence *fence =
1416 dma_resv_get_excl_rcu(obj->resv);
1417
1418 return drm_gem_fence_array_add(fence_array, fence);
1419 }
1420
1421 ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1422 &fence_count, &fences);
1423 if (ret || !fence_count)
1424 return ret;
1425
1426 for (i = 0; i < fence_count; i++) {
1427 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1428 if (ret)
1429 break;
1430 }
1431
1432 for (; i < fence_count; i++)
1433 dma_fence_put(fences[i]);
1434 kfree(fences);
1435 return ret;
1436}
1437EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/dma-buf.h>
29#include <linux/file.h>
30#include <linux/fs.h>
31#include <linux/iosys-map.h>
32#include <linux/mem_encrypt.h>
33#include <linux/mm.h>
34#include <linux/mman.h>
35#include <linux/module.h>
36#include <linux/pagemap.h>
37#include <linux/pagevec.h>
38#include <linux/shmem_fs.h>
39#include <linux/slab.h>
40#include <linux/string_helpers.h>
41#include <linux/types.h>
42#include <linux/uaccess.h>
43
44#include <drm/drm.h>
45#include <drm/drm_device.h>
46#include <drm/drm_drv.h>
47#include <drm/drm_file.h>
48#include <drm/drm_gem.h>
49#include <drm/drm_managed.h>
50#include <drm/drm_print.h>
51#include <drm/drm_vma_manager.h>
52
53#include "drm_internal.h"
54
55/** @file drm_gem.c
56 *
57 * This file provides some of the base ioctls and library routines for
58 * the graphics memory manager implemented by each device driver.
59 *
60 * Because various devices have different requirements in terms of
61 * synchronization and migration strategies, implementing that is left up to
62 * the driver, and all that the general API provides should be generic --
63 * allocating objects, reading/writing data with the cpu, freeing objects.
64 * Even there, platform-dependent optimizations for reading/writing data with
65 * the CPU mean we'll likely hook those out to driver-specific calls. However,
66 * the DRI2 implementation wants to have at least allocate/mmap be generic.
67 *
68 * The goal was to have swap-backed object allocation managed through
69 * struct file. However, file descriptors as handles to a struct file have
70 * two major failings:
71 * - Process limits prevent more than 1024 or so being used at a time by
72 * default.
73 * - Inability to allocate high fds will aggravate the X Server's select()
74 * handling, and likely that of many GL client applications as well.
75 *
76 * This led to a plan of using our own integer IDs (called handles, following
77 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
78 * ioctls. The objects themselves will still include the struct file so
79 * that we can transition to fds if the required kernel infrastructure shows
80 * up at a later date, and as our interface with shmfs for memory allocation.
81 */
82
83static void
84drm_gem_init_release(struct drm_device *dev, void *ptr)
85{
86 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
87}
88
89/**
90 * drm_gem_init - Initialize the GEM device fields
91 * @dev: drm_devic structure to initialize
92 */
93int
94drm_gem_init(struct drm_device *dev)
95{
96 struct drm_vma_offset_manager *vma_offset_manager;
97
98 mutex_init(&dev->object_name_lock);
99 idr_init_base(&dev->object_name_idr, 1);
100
101 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
102 GFP_KERNEL);
103 if (!vma_offset_manager) {
104 DRM_ERROR("out of memory\n");
105 return -ENOMEM;
106 }
107
108 dev->vma_offset_manager = vma_offset_manager;
109 drm_vma_offset_manager_init(vma_offset_manager,
110 DRM_FILE_PAGE_OFFSET_START,
111 DRM_FILE_PAGE_OFFSET_SIZE);
112
113 return drmm_add_action(dev, drm_gem_init_release, NULL);
114}
115
116/**
117 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
118 * @dev: drm_device the object should be initialized for
119 * @obj: drm_gem_object to initialize
120 * @size: object size
121 *
122 * Initialize an already allocated GEM object of the specified size with
123 * shmfs backing store.
124 */
125int drm_gem_object_init(struct drm_device *dev,
126 struct drm_gem_object *obj, size_t size)
127{
128 struct file *filp;
129
130 drm_gem_private_object_init(dev, obj, size);
131
132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
133 if (IS_ERR(filp))
134 return PTR_ERR(filp);
135
136 obj->filp = filp;
137
138 return 0;
139}
140EXPORT_SYMBOL(drm_gem_object_init);
141
142/**
143 * drm_gem_private_object_init - initialize an allocated private GEM object
144 * @dev: drm_device the object should be initialized for
145 * @obj: drm_gem_object to initialize
146 * @size: object size
147 *
148 * Initialize an already allocated GEM object of the specified size with
149 * no GEM provided backing store. Instead the caller is responsible for
150 * backing the object and handling it.
151 */
152void drm_gem_private_object_init(struct drm_device *dev,
153 struct drm_gem_object *obj, size_t size)
154{
155 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
156
157 obj->dev = dev;
158 obj->filp = NULL;
159
160 kref_init(&obj->refcount);
161 obj->handle_count = 0;
162 obj->size = size;
163 dma_resv_init(&obj->_resv);
164 if (!obj->resv)
165 obj->resv = &obj->_resv;
166
167 drm_vma_node_reset(&obj->vma_node);
168 INIT_LIST_HEAD(&obj->lru_node);
169}
170EXPORT_SYMBOL(drm_gem_private_object_init);
171
172/**
173 * drm_gem_object_handle_free - release resources bound to userspace handles
174 * @obj: GEM object to clean up.
175 *
176 * Called after the last handle to the object has been closed
177 *
178 * Removes any name for the object. Note that this must be
179 * called before drm_gem_object_free or we'll be touching
180 * freed memory
181 */
182static void drm_gem_object_handle_free(struct drm_gem_object *obj)
183{
184 struct drm_device *dev = obj->dev;
185
186 /* Remove any name for this object */
187 if (obj->name) {
188 idr_remove(&dev->object_name_idr, obj->name);
189 obj->name = 0;
190 }
191}
192
193static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
194{
195 /* Unbreak the reference cycle if we have an exported dma_buf. */
196 if (obj->dma_buf) {
197 dma_buf_put(obj->dma_buf);
198 obj->dma_buf = NULL;
199 }
200}
201
202static void
203drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
204{
205 struct drm_device *dev = obj->dev;
206 bool final = false;
207
208 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
209 return;
210
211 /*
212 * Must bump handle count first as this may be the last
213 * ref, in which case the object would disappear before we
214 * checked for a name
215 */
216
217 mutex_lock(&dev->object_name_lock);
218 if (--obj->handle_count == 0) {
219 drm_gem_object_handle_free(obj);
220 drm_gem_object_exported_dma_buf_free(obj);
221 final = true;
222 }
223 mutex_unlock(&dev->object_name_lock);
224
225 if (final)
226 drm_gem_object_put(obj);
227}
228
229/*
230 * Called at device or object close to release the file's
231 * handle references on objects.
232 */
233static int
234drm_gem_object_release_handle(int id, void *ptr, void *data)
235{
236 struct drm_file *file_priv = data;
237 struct drm_gem_object *obj = ptr;
238
239 if (obj->funcs->close)
240 obj->funcs->close(obj, file_priv);
241
242 drm_prime_remove_buf_handle(&file_priv->prime, id);
243 drm_vma_node_revoke(&obj->vma_node, file_priv);
244
245 drm_gem_object_handle_put_unlocked(obj);
246
247 return 0;
248}
249
250/**
251 * drm_gem_handle_delete - deletes the given file-private handle
252 * @filp: drm file-private structure to use for the handle look up
253 * @handle: userspace handle to delete
254 *
255 * Removes the GEM handle from the @filp lookup table which has been added with
256 * drm_gem_handle_create(). If this is the last handle also cleans up linked
257 * resources like GEM names.
258 */
259int
260drm_gem_handle_delete(struct drm_file *filp, u32 handle)
261{
262 struct drm_gem_object *obj;
263
264 spin_lock(&filp->table_lock);
265
266 /* Check if we currently have a reference on the object */
267 obj = idr_replace(&filp->object_idr, NULL, handle);
268 spin_unlock(&filp->table_lock);
269 if (IS_ERR_OR_NULL(obj))
270 return -EINVAL;
271
272 /* Release driver's reference and decrement refcount. */
273 drm_gem_object_release_handle(handle, obj, filp);
274
275 /* And finally make the handle available for future allocations. */
276 spin_lock(&filp->table_lock);
277 idr_remove(&filp->object_idr, handle);
278 spin_unlock(&filp->table_lock);
279
280 return 0;
281}
282EXPORT_SYMBOL(drm_gem_handle_delete);
283
284/**
285 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
286 * @file: drm file-private structure containing the gem object
287 * @dev: corresponding drm_device
288 * @handle: gem object handle
289 * @offset: return location for the fake mmap offset
290 *
291 * This implements the &drm_driver.dumb_map_offset kms driver callback for
292 * drivers which use gem to manage their backing storage.
293 *
294 * Returns:
295 * 0 on success or a negative error code on failure.
296 */
297int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
298 u32 handle, u64 *offset)
299{
300 struct drm_gem_object *obj;
301 int ret;
302
303 obj = drm_gem_object_lookup(file, handle);
304 if (!obj)
305 return -ENOENT;
306
307 /* Don't allow imported objects to be mapped */
308 if (obj->import_attach) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 ret = drm_gem_create_mmap_offset(obj);
314 if (ret)
315 goto out;
316
317 *offset = drm_vma_node_offset_addr(&obj->vma_node);
318out:
319 drm_gem_object_put(obj);
320
321 return ret;
322}
323EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
324
325int drm_gem_dumb_destroy(struct drm_file *file,
326 struct drm_device *dev,
327 u32 handle)
328{
329 return drm_gem_handle_delete(file, handle);
330}
331
332/**
333 * drm_gem_handle_create_tail - internal functions to create a handle
334 * @file_priv: drm file-private structure to register the handle for
335 * @obj: object to register
336 * @handlep: pointer to return the created handle to the caller
337 *
338 * This expects the &drm_device.object_name_lock to be held already and will
339 * drop it before returning. Used to avoid races in establishing new handles
340 * when importing an object from either an flink name or a dma-buf.
341 *
342 * Handles must be release again through drm_gem_handle_delete(). This is done
343 * when userspace closes @file_priv for all attached handles, or through the
344 * GEM_CLOSE ioctl for individual handles.
345 */
346int
347drm_gem_handle_create_tail(struct drm_file *file_priv,
348 struct drm_gem_object *obj,
349 u32 *handlep)
350{
351 struct drm_device *dev = obj->dev;
352 u32 handle;
353 int ret;
354
355 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
356 if (obj->handle_count++ == 0)
357 drm_gem_object_get(obj);
358
359 /*
360 * Get the user-visible handle using idr. Preload and perform
361 * allocation under our spinlock.
362 */
363 idr_preload(GFP_KERNEL);
364 spin_lock(&file_priv->table_lock);
365
366 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
367
368 spin_unlock(&file_priv->table_lock);
369 idr_preload_end();
370
371 mutex_unlock(&dev->object_name_lock);
372 if (ret < 0)
373 goto err_unref;
374
375 handle = ret;
376
377 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
378 if (ret)
379 goto err_remove;
380
381 if (obj->funcs->open) {
382 ret = obj->funcs->open(obj, file_priv);
383 if (ret)
384 goto err_revoke;
385 }
386
387 *handlep = handle;
388 return 0;
389
390err_revoke:
391 drm_vma_node_revoke(&obj->vma_node, file_priv);
392err_remove:
393 spin_lock(&file_priv->table_lock);
394 idr_remove(&file_priv->object_idr, handle);
395 spin_unlock(&file_priv->table_lock);
396err_unref:
397 drm_gem_object_handle_put_unlocked(obj);
398 return ret;
399}
400
401/**
402 * drm_gem_handle_create - create a gem handle for an object
403 * @file_priv: drm file-private structure to register the handle for
404 * @obj: object to register
405 * @handlep: pointer to return the created handle to the caller
406 *
407 * Create a handle for this object. This adds a handle reference to the object,
408 * which includes a regular reference count. Callers will likely want to
409 * dereference the object afterwards.
410 *
411 * Since this publishes @obj to userspace it must be fully set up by this point,
412 * drivers must call this last in their buffer object creation callbacks.
413 */
414int drm_gem_handle_create(struct drm_file *file_priv,
415 struct drm_gem_object *obj,
416 u32 *handlep)
417{
418 mutex_lock(&obj->dev->object_name_lock);
419
420 return drm_gem_handle_create_tail(file_priv, obj, handlep);
421}
422EXPORT_SYMBOL(drm_gem_handle_create);
423
424
425/**
426 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
427 * @obj: obj in question
428 *
429 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
430 *
431 * Note that drm_gem_object_release() already calls this function, so drivers
432 * don't have to take care of releasing the mmap offset themselves when freeing
433 * the GEM object.
434 */
435void
436drm_gem_free_mmap_offset(struct drm_gem_object *obj)
437{
438 struct drm_device *dev = obj->dev;
439
440 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
441}
442EXPORT_SYMBOL(drm_gem_free_mmap_offset);
443
444/**
445 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
446 * @obj: obj in question
447 * @size: the virtual size
448 *
449 * GEM memory mapping works by handing back to userspace a fake mmap offset
450 * it can use in a subsequent mmap(2) call. The DRM core code then looks
451 * up the object based on the offset and sets up the various memory mapping
452 * structures.
453 *
454 * This routine allocates and attaches a fake offset for @obj, in cases where
455 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
456 * Otherwise just use drm_gem_create_mmap_offset().
457 *
458 * This function is idempotent and handles an already allocated mmap offset
459 * transparently. Drivers do not need to check for this case.
460 */
461int
462drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
463{
464 struct drm_device *dev = obj->dev;
465
466 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
467 size / PAGE_SIZE);
468}
469EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
470
471/**
472 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
473 * @obj: obj in question
474 *
475 * GEM memory mapping works by handing back to userspace a fake mmap offset
476 * it can use in a subsequent mmap(2) call. The DRM core code then looks
477 * up the object based on the offset and sets up the various memory mapping
478 * structures.
479 *
480 * This routine allocates and attaches a fake offset for @obj.
481 *
482 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
483 * the fake offset again.
484 */
485int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
486{
487 return drm_gem_create_mmap_offset_size(obj, obj->size);
488}
489EXPORT_SYMBOL(drm_gem_create_mmap_offset);
490
491/*
492 * Move pages to appropriate lru and release the pagevec, decrementing the
493 * ref count of those pages.
494 */
495static void drm_gem_check_release_pagevec(struct pagevec *pvec)
496{
497 check_move_unevictable_pages(pvec);
498 __pagevec_release(pvec);
499 cond_resched();
500}
501
502/**
503 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
504 * from shmem
505 * @obj: obj in question
506 *
507 * This reads the page-array of the shmem-backing storage of the given gem
508 * object. An array of pages is returned. If a page is not allocated or
509 * swapped-out, this will allocate/swap-in the required pages. Note that the
510 * whole object is covered by the page-array and pinned in memory.
511 *
512 * Use drm_gem_put_pages() to release the array and unpin all pages.
513 *
514 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
515 * If you require other GFP-masks, you have to do those allocations yourself.
516 *
517 * Note that you are not allowed to change gfp-zones during runtime. That is,
518 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
519 * set during initialization. If you have special zone constraints, set them
520 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
521 * to keep pages in the required zone during swap-in.
522 *
523 * This function is only valid on objects initialized with
524 * drm_gem_object_init(), but not for those initialized with
525 * drm_gem_private_object_init() only.
526 */
527struct page **drm_gem_get_pages(struct drm_gem_object *obj)
528{
529 struct address_space *mapping;
530 struct page *p, **pages;
531 struct pagevec pvec;
532 int i, npages;
533
534
535 if (WARN_ON(!obj->filp))
536 return ERR_PTR(-EINVAL);
537
538 /* This is the shared memory object that backs the GEM resource */
539 mapping = obj->filp->f_mapping;
540
541 /* We already BUG_ON() for non-page-aligned sizes in
542 * drm_gem_object_init(), so we should never hit this unless
543 * driver author is doing something really wrong:
544 */
545 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
546
547 npages = obj->size >> PAGE_SHIFT;
548
549 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
550 if (pages == NULL)
551 return ERR_PTR(-ENOMEM);
552
553 mapping_set_unevictable(mapping);
554
555 for (i = 0; i < npages; i++) {
556 p = shmem_read_mapping_page(mapping, i);
557 if (IS_ERR(p))
558 goto fail;
559 pages[i] = p;
560
561 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
562 * correct region during swapin. Note that this requires
563 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
564 * so shmem can relocate pages during swapin if required.
565 */
566 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
567 (page_to_pfn(p) >= 0x00100000UL));
568 }
569
570 return pages;
571
572fail:
573 mapping_clear_unevictable(mapping);
574 pagevec_init(&pvec);
575 while (i--) {
576 if (!pagevec_add(&pvec, pages[i]))
577 drm_gem_check_release_pagevec(&pvec);
578 }
579 if (pagevec_count(&pvec))
580 drm_gem_check_release_pagevec(&pvec);
581
582 kvfree(pages);
583 return ERR_CAST(p);
584}
585EXPORT_SYMBOL(drm_gem_get_pages);
586
587/**
588 * drm_gem_put_pages - helper to free backing pages for a GEM object
589 * @obj: obj in question
590 * @pages: pages to free
591 * @dirty: if true, pages will be marked as dirty
592 * @accessed: if true, the pages will be marked as accessed
593 */
594void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
595 bool dirty, bool accessed)
596{
597 int i, npages;
598 struct address_space *mapping;
599 struct pagevec pvec;
600
601 mapping = file_inode(obj->filp)->i_mapping;
602 mapping_clear_unevictable(mapping);
603
604 /* We already BUG_ON() for non-page-aligned sizes in
605 * drm_gem_object_init(), so we should never hit this unless
606 * driver author is doing something really wrong:
607 */
608 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
609
610 npages = obj->size >> PAGE_SHIFT;
611
612 pagevec_init(&pvec);
613 for (i = 0; i < npages; i++) {
614 if (!pages[i])
615 continue;
616
617 if (dirty)
618 set_page_dirty(pages[i]);
619
620 if (accessed)
621 mark_page_accessed(pages[i]);
622
623 /* Undo the reference we took when populating the table */
624 if (!pagevec_add(&pvec, pages[i]))
625 drm_gem_check_release_pagevec(&pvec);
626 }
627 if (pagevec_count(&pvec))
628 drm_gem_check_release_pagevec(&pvec);
629
630 kvfree(pages);
631}
632EXPORT_SYMBOL(drm_gem_put_pages);
633
634static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
635 struct drm_gem_object **objs)
636{
637 int i, ret = 0;
638 struct drm_gem_object *obj;
639
640 spin_lock(&filp->table_lock);
641
642 for (i = 0; i < count; i++) {
643 /* Check if we currently have a reference on the object */
644 obj = idr_find(&filp->object_idr, handle[i]);
645 if (!obj) {
646 ret = -ENOENT;
647 break;
648 }
649 drm_gem_object_get(obj);
650 objs[i] = obj;
651 }
652 spin_unlock(&filp->table_lock);
653
654 return ret;
655}
656
657/**
658 * drm_gem_objects_lookup - look up GEM objects from an array of handles
659 * @filp: DRM file private date
660 * @bo_handles: user pointer to array of userspace handle
661 * @count: size of handle array
662 * @objs_out: returned pointer to array of drm_gem_object pointers
663 *
664 * Takes an array of userspace handles and returns a newly allocated array of
665 * GEM objects.
666 *
667 * For a single handle lookup, use drm_gem_object_lookup().
668 *
669 * Returns:
670 *
671 * @objs filled in with GEM object pointers. Returned GEM objects need to be
672 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
673 * failure. 0 is returned on success.
674 *
675 */
676int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
677 int count, struct drm_gem_object ***objs_out)
678{
679 int ret;
680 u32 *handles;
681 struct drm_gem_object **objs;
682
683 if (!count)
684 return 0;
685
686 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
687 GFP_KERNEL | __GFP_ZERO);
688 if (!objs)
689 return -ENOMEM;
690
691 *objs_out = objs;
692
693 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
694 if (!handles) {
695 ret = -ENOMEM;
696 goto out;
697 }
698
699 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
700 ret = -EFAULT;
701 DRM_DEBUG("Failed to copy in GEM handles\n");
702 goto out;
703 }
704
705 ret = objects_lookup(filp, handles, count, objs);
706out:
707 kvfree(handles);
708 return ret;
709
710}
711EXPORT_SYMBOL(drm_gem_objects_lookup);
712
713/**
714 * drm_gem_object_lookup - look up a GEM object from its handle
715 * @filp: DRM file private date
716 * @handle: userspace handle
717 *
718 * Returns:
719 *
720 * A reference to the object named by the handle if such exists on @filp, NULL
721 * otherwise.
722 *
723 * If looking up an array of handles, use drm_gem_objects_lookup().
724 */
725struct drm_gem_object *
726drm_gem_object_lookup(struct drm_file *filp, u32 handle)
727{
728 struct drm_gem_object *obj = NULL;
729
730 objects_lookup(filp, &handle, 1, &obj);
731 return obj;
732}
733EXPORT_SYMBOL(drm_gem_object_lookup);
734
735/**
736 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
737 * shared and/or exclusive fences.
738 * @filep: DRM file private date
739 * @handle: userspace handle
740 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
741 * @timeout: timeout value in jiffies or zero to return immediately
742 *
743 * Returns:
744 *
745 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
746 * greater than 0 on success.
747 */
748long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
749 bool wait_all, unsigned long timeout)
750{
751 long ret;
752 struct drm_gem_object *obj;
753
754 obj = drm_gem_object_lookup(filep, handle);
755 if (!obj) {
756 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
757 return -EINVAL;
758 }
759
760 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
761 true, timeout);
762 if (ret == 0)
763 ret = -ETIME;
764 else if (ret > 0)
765 ret = 0;
766
767 drm_gem_object_put(obj);
768
769 return ret;
770}
771EXPORT_SYMBOL(drm_gem_dma_resv_wait);
772
773/**
774 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
775 * @dev: drm_device
776 * @data: ioctl data
777 * @file_priv: drm file-private structure
778 *
779 * Releases the handle to an mm object.
780 */
781int
782drm_gem_close_ioctl(struct drm_device *dev, void *data,
783 struct drm_file *file_priv)
784{
785 struct drm_gem_close *args = data;
786 int ret;
787
788 if (!drm_core_check_feature(dev, DRIVER_GEM))
789 return -EOPNOTSUPP;
790
791 ret = drm_gem_handle_delete(file_priv, args->handle);
792
793 return ret;
794}
795
796/**
797 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
798 * @dev: drm_device
799 * @data: ioctl data
800 * @file_priv: drm file-private structure
801 *
802 * Create a global name for an object, returning the name.
803 *
804 * Note that the name does not hold a reference; when the object
805 * is freed, the name goes away.
806 */
807int
808drm_gem_flink_ioctl(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
810{
811 struct drm_gem_flink *args = data;
812 struct drm_gem_object *obj;
813 int ret;
814
815 if (!drm_core_check_feature(dev, DRIVER_GEM))
816 return -EOPNOTSUPP;
817
818 obj = drm_gem_object_lookup(file_priv, args->handle);
819 if (obj == NULL)
820 return -ENOENT;
821
822 mutex_lock(&dev->object_name_lock);
823 /* prevent races with concurrent gem_close. */
824 if (obj->handle_count == 0) {
825 ret = -ENOENT;
826 goto err;
827 }
828
829 if (!obj->name) {
830 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
831 if (ret < 0)
832 goto err;
833
834 obj->name = ret;
835 }
836
837 args->name = (uint64_t) obj->name;
838 ret = 0;
839
840err:
841 mutex_unlock(&dev->object_name_lock);
842 drm_gem_object_put(obj);
843 return ret;
844}
845
846/**
847 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
848 * @dev: drm_device
849 * @data: ioctl data
850 * @file_priv: drm file-private structure
851 *
852 * Open an object using the global name, returning a handle and the size.
853 *
854 * This handle (of course) holds a reference to the object, so the object
855 * will not go away until the handle is deleted.
856 */
857int
858drm_gem_open_ioctl(struct drm_device *dev, void *data,
859 struct drm_file *file_priv)
860{
861 struct drm_gem_open *args = data;
862 struct drm_gem_object *obj;
863 int ret;
864 u32 handle;
865
866 if (!drm_core_check_feature(dev, DRIVER_GEM))
867 return -EOPNOTSUPP;
868
869 mutex_lock(&dev->object_name_lock);
870 obj = idr_find(&dev->object_name_idr, (int) args->name);
871 if (obj) {
872 drm_gem_object_get(obj);
873 } else {
874 mutex_unlock(&dev->object_name_lock);
875 return -ENOENT;
876 }
877
878 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
879 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
880 if (ret)
881 goto err;
882
883 args->handle = handle;
884 args->size = obj->size;
885
886err:
887 drm_gem_object_put(obj);
888 return ret;
889}
890
891/**
892 * drm_gem_open - initializes GEM file-private structures at devnode open time
893 * @dev: drm_device which is being opened by userspace
894 * @file_private: drm file-private structure to set up
895 *
896 * Called at device open time, sets up the structure for handling refcounting
897 * of mm objects.
898 */
899void
900drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
901{
902 idr_init_base(&file_private->object_idr, 1);
903 spin_lock_init(&file_private->table_lock);
904}
905
906/**
907 * drm_gem_release - release file-private GEM resources
908 * @dev: drm_device which is being closed by userspace
909 * @file_private: drm file-private structure to clean up
910 *
911 * Called at close time when the filp is going away.
912 *
913 * Releases any remaining references on objects by this filp.
914 */
915void
916drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
917{
918 idr_for_each(&file_private->object_idr,
919 &drm_gem_object_release_handle, file_private);
920 idr_destroy(&file_private->object_idr);
921}
922
923/**
924 * drm_gem_object_release - release GEM buffer object resources
925 * @obj: GEM buffer object
926 *
927 * This releases any structures and resources used by @obj and is the inverse of
928 * drm_gem_object_init().
929 */
930void
931drm_gem_object_release(struct drm_gem_object *obj)
932{
933 WARN_ON(obj->dma_buf);
934
935 if (obj->filp)
936 fput(obj->filp);
937
938 dma_resv_fini(&obj->_resv);
939 drm_gem_free_mmap_offset(obj);
940 drm_gem_lru_remove(obj);
941}
942EXPORT_SYMBOL(drm_gem_object_release);
943
944/**
945 * drm_gem_object_free - free a GEM object
946 * @kref: kref of the object to free
947 *
948 * Called after the last reference to the object has been lost.
949 *
950 * Frees the object
951 */
952void
953drm_gem_object_free(struct kref *kref)
954{
955 struct drm_gem_object *obj =
956 container_of(kref, struct drm_gem_object, refcount);
957
958 if (WARN_ON(!obj->funcs->free))
959 return;
960
961 obj->funcs->free(obj);
962}
963EXPORT_SYMBOL(drm_gem_object_free);
964
965/**
966 * drm_gem_vm_open - vma->ops->open implementation for GEM
967 * @vma: VM area structure
968 *
969 * This function implements the #vm_operations_struct open() callback for GEM
970 * drivers. This must be used together with drm_gem_vm_close().
971 */
972void drm_gem_vm_open(struct vm_area_struct *vma)
973{
974 struct drm_gem_object *obj = vma->vm_private_data;
975
976 drm_gem_object_get(obj);
977}
978EXPORT_SYMBOL(drm_gem_vm_open);
979
980/**
981 * drm_gem_vm_close - vma->ops->close implementation for GEM
982 * @vma: VM area structure
983 *
984 * This function implements the #vm_operations_struct close() callback for GEM
985 * drivers. This must be used together with drm_gem_vm_open().
986 */
987void drm_gem_vm_close(struct vm_area_struct *vma)
988{
989 struct drm_gem_object *obj = vma->vm_private_data;
990
991 drm_gem_object_put(obj);
992}
993EXPORT_SYMBOL(drm_gem_vm_close);
994
995/**
996 * drm_gem_mmap_obj - memory map a GEM object
997 * @obj: the GEM object to map
998 * @obj_size: the object size to be mapped, in bytes
999 * @vma: VMA for the area to be mapped
1000 *
1001 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1002 * vm_ops. Depending on their requirements, GEM objects can either
1003 * provide a fault handler in their vm_ops (in which case any accesses to
1004 * the object will be trapped, to perform migration, GTT binding, surface
1005 * register allocation, or performance monitoring), or mmap the buffer memory
1006 * synchronously after calling drm_gem_mmap_obj.
1007 *
1008 * This function is mainly intended to implement the DMABUF mmap operation, when
1009 * the GEM object is not looked up based on its fake offset. To implement the
1010 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1011 *
1012 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1013 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1014 * callers must verify access restrictions before calling this helper.
1015 *
1016 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1017 * size, or if no vm_ops are provided.
1018 */
1019int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1020 struct vm_area_struct *vma)
1021{
1022 int ret;
1023
1024 /* Check for valid size. */
1025 if (obj_size < vma->vm_end - vma->vm_start)
1026 return -EINVAL;
1027
1028 /* Take a ref for this mapping of the object, so that the fault
1029 * handler can dereference the mmap offset's pointer to the object.
1030 * This reference is cleaned up by the corresponding vm_close
1031 * (which should happen whether the vma was created by this call, or
1032 * by a vm_open due to mremap or partial unmap or whatever).
1033 */
1034 drm_gem_object_get(obj);
1035
1036 vma->vm_private_data = obj;
1037 vma->vm_ops = obj->funcs->vm_ops;
1038
1039 if (obj->funcs->mmap) {
1040 ret = obj->funcs->mmap(obj, vma);
1041 if (ret)
1042 goto err_drm_gem_object_put;
1043 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1044 } else {
1045 if (!vma->vm_ops) {
1046 ret = -EINVAL;
1047 goto err_drm_gem_object_put;
1048 }
1049
1050 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1051 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1052 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1053 }
1054
1055 return 0;
1056
1057err_drm_gem_object_put:
1058 drm_gem_object_put(obj);
1059 return ret;
1060}
1061EXPORT_SYMBOL(drm_gem_mmap_obj);
1062
1063/**
1064 * drm_gem_mmap - memory map routine for GEM objects
1065 * @filp: DRM file pointer
1066 * @vma: VMA for the area to be mapped
1067 *
1068 * If a driver supports GEM object mapping, mmap calls on the DRM file
1069 * descriptor will end up here.
1070 *
1071 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1072 * contain the fake offset we created when the GTT map ioctl was called on
1073 * the object) and map it with a call to drm_gem_mmap_obj().
1074 *
1075 * If the caller is not granted access to the buffer object, the mmap will fail
1076 * with EACCES. Please see the vma manager for more information.
1077 */
1078int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1079{
1080 struct drm_file *priv = filp->private_data;
1081 struct drm_device *dev = priv->minor->dev;
1082 struct drm_gem_object *obj = NULL;
1083 struct drm_vma_offset_node *node;
1084 int ret;
1085
1086 if (drm_dev_is_unplugged(dev))
1087 return -ENODEV;
1088
1089 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1090 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1091 vma->vm_pgoff,
1092 vma_pages(vma));
1093 if (likely(node)) {
1094 obj = container_of(node, struct drm_gem_object, vma_node);
1095 /*
1096 * When the object is being freed, after it hits 0-refcnt it
1097 * proceeds to tear down the object. In the process it will
1098 * attempt to remove the VMA offset and so acquire this
1099 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1100 * that matches our range, we know it is in the process of being
1101 * destroyed and will be freed as soon as we release the lock -
1102 * so we have to check for the 0-refcnted object and treat it as
1103 * invalid.
1104 */
1105 if (!kref_get_unless_zero(&obj->refcount))
1106 obj = NULL;
1107 }
1108 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1109
1110 if (!obj)
1111 return -EINVAL;
1112
1113 if (!drm_vma_node_is_allowed(node, priv)) {
1114 drm_gem_object_put(obj);
1115 return -EACCES;
1116 }
1117
1118 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1119 vma);
1120
1121 drm_gem_object_put(obj);
1122
1123 return ret;
1124}
1125EXPORT_SYMBOL(drm_gem_mmap);
1126
1127void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1128 const struct drm_gem_object *obj)
1129{
1130 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1131 drm_printf_indent(p, indent, "refcount=%u\n",
1132 kref_read(&obj->refcount));
1133 drm_printf_indent(p, indent, "start=%08lx\n",
1134 drm_vma_node_start(&obj->vma_node));
1135 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1136 drm_printf_indent(p, indent, "imported=%s\n",
1137 str_yes_no(obj->import_attach));
1138
1139 if (obj->funcs->print_info)
1140 obj->funcs->print_info(p, indent, obj);
1141}
1142
1143int drm_gem_pin(struct drm_gem_object *obj)
1144{
1145 if (obj->funcs->pin)
1146 return obj->funcs->pin(obj);
1147 else
1148 return 0;
1149}
1150
1151void drm_gem_unpin(struct drm_gem_object *obj)
1152{
1153 if (obj->funcs->unpin)
1154 obj->funcs->unpin(obj);
1155}
1156
1157int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1158{
1159 int ret;
1160
1161 dma_resv_assert_held(obj->resv);
1162
1163 if (!obj->funcs->vmap)
1164 return -EOPNOTSUPP;
1165
1166 ret = obj->funcs->vmap(obj, map);
1167 if (ret)
1168 return ret;
1169 else if (iosys_map_is_null(map))
1170 return -ENOMEM;
1171
1172 return 0;
1173}
1174EXPORT_SYMBOL(drm_gem_vmap);
1175
1176void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1177{
1178 dma_resv_assert_held(obj->resv);
1179
1180 if (iosys_map_is_null(map))
1181 return;
1182
1183 if (obj->funcs->vunmap)
1184 obj->funcs->vunmap(obj, map);
1185
1186 /* Always set the mapping to NULL. Callers may rely on this. */
1187 iosys_map_clear(map);
1188}
1189EXPORT_SYMBOL(drm_gem_vunmap);
1190
1191int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1192{
1193 int ret;
1194
1195 dma_resv_lock(obj->resv, NULL);
1196 ret = drm_gem_vmap(obj, map);
1197 dma_resv_unlock(obj->resv);
1198
1199 return ret;
1200}
1201EXPORT_SYMBOL(drm_gem_vmap_unlocked);
1202
1203void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1204{
1205 dma_resv_lock(obj->resv, NULL);
1206 drm_gem_vunmap(obj, map);
1207 dma_resv_unlock(obj->resv);
1208}
1209EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
1210
1211/**
1212 * drm_gem_lock_reservations - Sets up the ww context and acquires
1213 * the lock on an array of GEM objects.
1214 *
1215 * Once you've locked your reservations, you'll want to set up space
1216 * for your shared fences (if applicable), submit your job, then
1217 * drm_gem_unlock_reservations().
1218 *
1219 * @objs: drm_gem_objects to lock
1220 * @count: Number of objects in @objs
1221 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1222 * part of tracking this set of locked reservations.
1223 */
1224int
1225drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1226 struct ww_acquire_ctx *acquire_ctx)
1227{
1228 int contended = -1;
1229 int i, ret;
1230
1231 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1232
1233retry:
1234 if (contended != -1) {
1235 struct drm_gem_object *obj = objs[contended];
1236
1237 ret = dma_resv_lock_slow_interruptible(obj->resv,
1238 acquire_ctx);
1239 if (ret) {
1240 ww_acquire_fini(acquire_ctx);
1241 return ret;
1242 }
1243 }
1244
1245 for (i = 0; i < count; i++) {
1246 if (i == contended)
1247 continue;
1248
1249 ret = dma_resv_lock_interruptible(objs[i]->resv,
1250 acquire_ctx);
1251 if (ret) {
1252 int j;
1253
1254 for (j = 0; j < i; j++)
1255 dma_resv_unlock(objs[j]->resv);
1256
1257 if (contended != -1 && contended >= i)
1258 dma_resv_unlock(objs[contended]->resv);
1259
1260 if (ret == -EDEADLK) {
1261 contended = i;
1262 goto retry;
1263 }
1264
1265 ww_acquire_fini(acquire_ctx);
1266 return ret;
1267 }
1268 }
1269
1270 ww_acquire_done(acquire_ctx);
1271
1272 return 0;
1273}
1274EXPORT_SYMBOL(drm_gem_lock_reservations);
1275
1276void
1277drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1278 struct ww_acquire_ctx *acquire_ctx)
1279{
1280 int i;
1281
1282 for (i = 0; i < count; i++)
1283 dma_resv_unlock(objs[i]->resv);
1284
1285 ww_acquire_fini(acquire_ctx);
1286}
1287EXPORT_SYMBOL(drm_gem_unlock_reservations);
1288
1289/**
1290 * drm_gem_lru_init - initialize a LRU
1291 *
1292 * @lru: The LRU to initialize
1293 * @lock: The lock protecting the LRU
1294 */
1295void
1296drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
1297{
1298 lru->lock = lock;
1299 lru->count = 0;
1300 INIT_LIST_HEAD(&lru->list);
1301}
1302EXPORT_SYMBOL(drm_gem_lru_init);
1303
1304static void
1305drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1306{
1307 obj->lru->count -= obj->size >> PAGE_SHIFT;
1308 WARN_ON(obj->lru->count < 0);
1309 list_del(&obj->lru_node);
1310 obj->lru = NULL;
1311}
1312
1313/**
1314 * drm_gem_lru_remove - remove object from whatever LRU it is in
1315 *
1316 * If the object is currently in any LRU, remove it.
1317 *
1318 * @obj: The GEM object to remove from current LRU
1319 */
1320void
1321drm_gem_lru_remove(struct drm_gem_object *obj)
1322{
1323 struct drm_gem_lru *lru = obj->lru;
1324
1325 if (!lru)
1326 return;
1327
1328 mutex_lock(lru->lock);
1329 drm_gem_lru_remove_locked(obj);
1330 mutex_unlock(lru->lock);
1331}
1332EXPORT_SYMBOL(drm_gem_lru_remove);
1333
1334static void
1335drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1336{
1337 lockdep_assert_held_once(lru->lock);
1338
1339 if (obj->lru)
1340 drm_gem_lru_remove_locked(obj);
1341
1342 lru->count += obj->size >> PAGE_SHIFT;
1343 list_add_tail(&obj->lru_node, &lru->list);
1344 obj->lru = lru;
1345}
1346
1347/**
1348 * drm_gem_lru_move_tail - move the object to the tail of the LRU
1349 *
1350 * If the object is already in this LRU it will be moved to the
1351 * tail. Otherwise it will be removed from whichever other LRU
1352 * it is in (if any) and moved into this LRU.
1353 *
1354 * @lru: The LRU to move the object into.
1355 * @obj: The GEM object to move into this LRU
1356 */
1357void
1358drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1359{
1360 mutex_lock(lru->lock);
1361 drm_gem_lru_move_tail_locked(lru, obj);
1362 mutex_unlock(lru->lock);
1363}
1364EXPORT_SYMBOL(drm_gem_lru_move_tail);
1365
1366/**
1367 * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1368 *
1369 * If the shrink callback succeeds, it is expected that the driver
1370 * move the object out of this LRU.
1371 *
1372 * If the LRU possibly contain active buffers, it is the responsibility
1373 * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1374 * or if necessary block until the buffer becomes idle.
1375 *
1376 * @lru: The LRU to scan
1377 * @nr_to_scan: The number of pages to try to reclaim
1378 * @shrink: Callback to try to shrink/reclaim the object.
1379 */
1380unsigned long
1381drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
1382 bool (*shrink)(struct drm_gem_object *obj))
1383{
1384 struct drm_gem_lru still_in_lru;
1385 struct drm_gem_object *obj;
1386 unsigned freed = 0;
1387
1388 drm_gem_lru_init(&still_in_lru, lru->lock);
1389
1390 mutex_lock(lru->lock);
1391
1392 while (freed < nr_to_scan) {
1393 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
1394
1395 if (!obj)
1396 break;
1397
1398 drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1399
1400 /*
1401 * If it's in the process of being freed, gem_object->free()
1402 * may be blocked on lock waiting to remove it. So just
1403 * skip it.
1404 */
1405 if (!kref_get_unless_zero(&obj->refcount))
1406 continue;
1407
1408 /*
1409 * Now that we own a reference, we can drop the lock for the
1410 * rest of the loop body, to reduce contention with other
1411 * code paths that need the LRU lock
1412 */
1413 mutex_unlock(lru->lock);
1414
1415 /*
1416 * Note that this still needs to be trylock, since we can
1417 * hit shrinker in response to trying to get backing pages
1418 * for this obj (ie. while it's lock is already held)
1419 */
1420 if (!dma_resv_trylock(obj->resv))
1421 goto tail;
1422
1423 if (shrink(obj)) {
1424 freed += obj->size >> PAGE_SHIFT;
1425
1426 /*
1427 * If we succeeded in releasing the object's backing
1428 * pages, we expect the driver to have moved the object
1429 * out of this LRU
1430 */
1431 WARN_ON(obj->lru == &still_in_lru);
1432 WARN_ON(obj->lru == lru);
1433 }
1434
1435 dma_resv_unlock(obj->resv);
1436
1437tail:
1438 drm_gem_object_put(obj);
1439 mutex_lock(lru->lock);
1440 }
1441
1442 /*
1443 * Move objects we've skipped over out of the temporary still_in_lru
1444 * back into this LRU
1445 */
1446 list_for_each_entry (obj, &still_in_lru.list, lru_node)
1447 obj->lru = lru;
1448 list_splice_tail(&still_in_lru.list, &lru->list);
1449 lru->count += still_in_lru.count;
1450
1451 mutex_unlock(lru->lock);
1452
1453 return freed;
1454}
1455EXPORT_SYMBOL(drm_gem_lru_scan);