Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
8#include <linux/module.h>
9#include <linux/mutex.h>
10#include <linux/shmem_fs.h>
11#include <linux/slab.h>
12#include <linux/vmalloc.h>
13
14#ifdef CONFIG_X86
15#include <asm/set_memory.h>
16#endif
17
18#include <drm/drm.h>
19#include <drm/drm_device.h>
20#include <drm/drm_drv.h>
21#include <drm/drm_gem_shmem_helper.h>
22#include <drm/drm_prime.h>
23#include <drm/drm_print.h>
24
25MODULE_IMPORT_NS("DMA_BUF");
26
27/**
28 * DOC: overview
29 *
30 * This library provides helpers for GEM objects backed by shmem buffers
31 * allocated using anonymous pageable memory.
32 *
33 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
34 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
35 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
36 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
37 */
38
39static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
40 .free = drm_gem_shmem_object_free,
41 .print_info = drm_gem_shmem_object_print_info,
42 .pin = drm_gem_shmem_object_pin,
43 .unpin = drm_gem_shmem_object_unpin,
44 .get_sg_table = drm_gem_shmem_object_get_sg_table,
45 .vmap = drm_gem_shmem_object_vmap,
46 .vunmap = drm_gem_shmem_object_vunmap,
47 .mmap = drm_gem_shmem_object_mmap,
48 .vm_ops = &drm_gem_shmem_vm_ops,
49};
50
51static struct drm_gem_shmem_object *
52__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
53 struct vfsmount *gemfs)
54{
55 struct drm_gem_shmem_object *shmem;
56 struct drm_gem_object *obj;
57 int ret = 0;
58
59 size = PAGE_ALIGN(size);
60
61 if (dev->driver->gem_create_object) {
62 obj = dev->driver->gem_create_object(dev, size);
63 if (IS_ERR(obj))
64 return ERR_CAST(obj);
65 shmem = to_drm_gem_shmem_obj(obj);
66 } else {
67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 if (!shmem)
69 return ERR_PTR(-ENOMEM);
70 obj = &shmem->base;
71 }
72
73 if (!obj->funcs)
74 obj->funcs = &drm_gem_shmem_funcs;
75
76 if (private) {
77 drm_gem_private_object_init(dev, obj, size);
78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 } else {
80 ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
81 }
82 if (ret) {
83 drm_gem_private_object_fini(obj);
84 goto err_free;
85 }
86
87 ret = drm_gem_create_mmap_offset(obj);
88 if (ret)
89 goto err_release;
90
91 INIT_LIST_HEAD(&shmem->madv_list);
92
93 if (!private) {
94 /*
95 * Our buffers are kept pinned, so allocating them
96 * from the MOVABLE zone is a really bad idea, and
97 * conflicts with CMA. See comments above new_inode()
98 * why this is required _and_ expected if you're
99 * going to pin these pages.
100 */
101 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103 }
104
105 return shmem;
106
107err_release:
108 drm_gem_object_release(obj);
109err_free:
110 kfree(obj);
111
112 return ERR_PTR(ret);
113}
114/**
115 * drm_gem_shmem_create - Allocate an object with the given size
116 * @dev: DRM device
117 * @size: Size of the object to allocate
118 *
119 * This function creates a shmem GEM object.
120 *
121 * Returns:
122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123 * error code on failure.
124 */
125struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126{
127 return __drm_gem_shmem_create(dev, size, false, NULL);
128}
129EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130
131/**
132 * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
133 * given mountpoint
134 * @dev: DRM device
135 * @size: Size of the object to allocate
136 * @gemfs: tmpfs mount where the GEM object will be created
137 *
138 * This function creates a shmem GEM object in a given tmpfs mountpoint.
139 *
140 * Returns:
141 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
142 * error code on failure.
143 */
144struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
145 size_t size,
146 struct vfsmount *gemfs)
147{
148 return __drm_gem_shmem_create(dev, size, false, gemfs);
149}
150EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
151
152/**
153 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
154 * @shmem: shmem GEM object to free
155 *
156 * This function cleans up the GEM object state and frees the memory used to
157 * store the object itself.
158 */
159void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
160{
161 struct drm_gem_object *obj = &shmem->base;
162
163 if (obj->import_attach) {
164 drm_prime_gem_destroy(obj, shmem->sgt);
165 } else {
166 dma_resv_lock(shmem->base.resv, NULL);
167
168 drm_WARN_ON(obj->dev, shmem->vmap_use_count);
169
170 if (shmem->sgt) {
171 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
172 DMA_BIDIRECTIONAL, 0);
173 sg_free_table(shmem->sgt);
174 kfree(shmem->sgt);
175 }
176 if (shmem->pages)
177 drm_gem_shmem_put_pages(shmem);
178
179 drm_WARN_ON(obj->dev, shmem->pages_use_count);
180
181 dma_resv_unlock(shmem->base.resv);
182 }
183
184 drm_gem_object_release(obj);
185 kfree(shmem);
186}
187EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
188
189static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
190{
191 struct drm_gem_object *obj = &shmem->base;
192 struct page **pages;
193
194 dma_resv_assert_held(shmem->base.resv);
195
196 if (shmem->pages_use_count++ > 0)
197 return 0;
198
199 pages = drm_gem_get_pages(obj);
200 if (IS_ERR(pages)) {
201 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
202 PTR_ERR(pages));
203 shmem->pages_use_count = 0;
204 return PTR_ERR(pages);
205 }
206
207 /*
208 * TODO: Allocating WC pages which are correctly flushed is only
209 * supported on x86. Ideal solution would be a GFP_WC flag, which also
210 * ttm_pool.c could use.
211 */
212#ifdef CONFIG_X86
213 if (shmem->map_wc)
214 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
215#endif
216
217 shmem->pages = pages;
218
219 return 0;
220}
221
222/*
223 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
224 * @shmem: shmem GEM object
225 *
226 * This function decreases the use count and puts the backing pages when use drops to zero.
227 */
228void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
229{
230 struct drm_gem_object *obj = &shmem->base;
231
232 dma_resv_assert_held(shmem->base.resv);
233
234 if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
235 return;
236
237 if (--shmem->pages_use_count > 0)
238 return;
239
240#ifdef CONFIG_X86
241 if (shmem->map_wc)
242 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
243#endif
244
245 drm_gem_put_pages(obj, shmem->pages,
246 shmem->pages_mark_dirty_on_put,
247 shmem->pages_mark_accessed_on_put);
248 shmem->pages = NULL;
249}
250EXPORT_SYMBOL(drm_gem_shmem_put_pages);
251
252int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
253{
254 int ret;
255
256 dma_resv_assert_held(shmem->base.resv);
257
258 drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
259
260 ret = drm_gem_shmem_get_pages(shmem);
261
262 return ret;
263}
264EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
265
266void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
267{
268 dma_resv_assert_held(shmem->base.resv);
269
270 drm_gem_shmem_put_pages(shmem);
271}
272EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
273
274/**
275 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
276 * @shmem: shmem GEM object
277 *
278 * This function makes sure the backing pages are pinned in memory while the
279 * buffer is exported.
280 *
281 * Returns:
282 * 0 on success or a negative error code on failure.
283 */
284int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
285{
286 struct drm_gem_object *obj = &shmem->base;
287 int ret;
288
289 drm_WARN_ON(obj->dev, obj->import_attach);
290
291 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
292 if (ret)
293 return ret;
294 ret = drm_gem_shmem_pin_locked(shmem);
295 dma_resv_unlock(shmem->base.resv);
296
297 return ret;
298}
299EXPORT_SYMBOL(drm_gem_shmem_pin);
300
301/**
302 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
303 * @shmem: shmem GEM object
304 *
305 * This function removes the requirement that the backing pages are pinned in
306 * memory.
307 */
308void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
309{
310 struct drm_gem_object *obj = &shmem->base;
311
312 drm_WARN_ON(obj->dev, obj->import_attach);
313
314 dma_resv_lock(shmem->base.resv, NULL);
315 drm_gem_shmem_unpin_locked(shmem);
316 dma_resv_unlock(shmem->base.resv);
317}
318EXPORT_SYMBOL(drm_gem_shmem_unpin);
319
320/*
321 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
322 * @shmem: shmem GEM object
323 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
324 * store.
325 *
326 * This function makes sure that a contiguous kernel virtual address mapping
327 * exists for the buffer backing the shmem GEM object. It hides the differences
328 * between dma-buf imported and natively allocated objects.
329 *
330 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
331 *
332 * Returns:
333 * 0 on success or a negative error code on failure.
334 */
335int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
336 struct iosys_map *map)
337{
338 struct drm_gem_object *obj = &shmem->base;
339 int ret = 0;
340
341 if (obj->import_attach) {
342 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
343 if (!ret) {
344 if (drm_WARN_ON(obj->dev, map->is_iomem)) {
345 dma_buf_vunmap(obj->import_attach->dmabuf, map);
346 return -EIO;
347 }
348 }
349 } else {
350 pgprot_t prot = PAGE_KERNEL;
351
352 dma_resv_assert_held(shmem->base.resv);
353
354 if (shmem->vmap_use_count++ > 0) {
355 iosys_map_set_vaddr(map, shmem->vaddr);
356 return 0;
357 }
358
359 ret = drm_gem_shmem_get_pages(shmem);
360 if (ret)
361 goto err_zero_use;
362
363 if (shmem->map_wc)
364 prot = pgprot_writecombine(prot);
365 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
366 VM_MAP, prot);
367 if (!shmem->vaddr)
368 ret = -ENOMEM;
369 else
370 iosys_map_set_vaddr(map, shmem->vaddr);
371 }
372
373 if (ret) {
374 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
375 goto err_put_pages;
376 }
377
378 return 0;
379
380err_put_pages:
381 if (!obj->import_attach)
382 drm_gem_shmem_put_pages(shmem);
383err_zero_use:
384 shmem->vmap_use_count = 0;
385
386 return ret;
387}
388EXPORT_SYMBOL(drm_gem_shmem_vmap);
389
390/*
391 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
392 * @shmem: shmem GEM object
393 * @map: Kernel virtual address where the SHMEM GEM object was mapped
394 *
395 * This function cleans up a kernel virtual address mapping acquired by
396 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
397 * zero.
398 *
399 * This function hides the differences between dma-buf imported and natively
400 * allocated objects.
401 */
402void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
403 struct iosys_map *map)
404{
405 struct drm_gem_object *obj = &shmem->base;
406
407 if (obj->import_attach) {
408 dma_buf_vunmap(obj->import_attach->dmabuf, map);
409 } else {
410 dma_resv_assert_held(shmem->base.resv);
411
412 if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
413 return;
414
415 if (--shmem->vmap_use_count > 0)
416 return;
417
418 vunmap(shmem->vaddr);
419 drm_gem_shmem_put_pages(shmem);
420 }
421
422 shmem->vaddr = NULL;
423}
424EXPORT_SYMBOL(drm_gem_shmem_vunmap);
425
426static int
427drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
428 struct drm_device *dev, size_t size,
429 uint32_t *handle)
430{
431 struct drm_gem_shmem_object *shmem;
432 int ret;
433
434 shmem = drm_gem_shmem_create(dev, size);
435 if (IS_ERR(shmem))
436 return PTR_ERR(shmem);
437
438 /*
439 * Allocate an id of idr table where the obj is registered
440 * and handle has the id what user can see.
441 */
442 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
443 /* drop reference from allocate - handle holds it now. */
444 drm_gem_object_put(&shmem->base);
445
446 return ret;
447}
448
449/* Update madvise status, returns true if not purged, else
450 * false or -errno.
451 */
452int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
453{
454 dma_resv_assert_held(shmem->base.resv);
455
456 if (shmem->madv >= 0)
457 shmem->madv = madv;
458
459 madv = shmem->madv;
460
461 return (madv >= 0);
462}
463EXPORT_SYMBOL(drm_gem_shmem_madvise);
464
465void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
466{
467 struct drm_gem_object *obj = &shmem->base;
468 struct drm_device *dev = obj->dev;
469
470 dma_resv_assert_held(shmem->base.resv);
471
472 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
473
474 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
475 sg_free_table(shmem->sgt);
476 kfree(shmem->sgt);
477 shmem->sgt = NULL;
478
479 drm_gem_shmem_put_pages(shmem);
480
481 shmem->madv = -1;
482
483 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
484 drm_gem_free_mmap_offset(obj);
485
486 /* Our goal here is to return as much of the memory as
487 * is possible back to the system as we are called from OOM.
488 * To do this we must instruct the shmfs to drop all of its
489 * backing pages, *now*.
490 */
491 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
492
493 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
494}
495EXPORT_SYMBOL(drm_gem_shmem_purge);
496
497/**
498 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
499 * @file: DRM file structure to create the dumb buffer for
500 * @dev: DRM device
501 * @args: IOCTL data
502 *
503 * This function computes the pitch of the dumb buffer and rounds it up to an
504 * integer number of bytes per pixel. Drivers for hardware that doesn't have
505 * any additional restrictions on the pitch can directly use this function as
506 * their &drm_driver.dumb_create callback.
507 *
508 * For hardware with additional restrictions, drivers can adjust the fields
509 * set up by userspace before calling into this function.
510 *
511 * Returns:
512 * 0 on success or a negative error code on failure.
513 */
514int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
515 struct drm_mode_create_dumb *args)
516{
517 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
518
519 if (!args->pitch || !args->size) {
520 args->pitch = min_pitch;
521 args->size = PAGE_ALIGN(args->pitch * args->height);
522 } else {
523 /* ensure sane minimum values */
524 if (args->pitch < min_pitch)
525 args->pitch = min_pitch;
526 if (args->size < args->pitch * args->height)
527 args->size = PAGE_ALIGN(args->pitch * args->height);
528 }
529
530 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
531}
532EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
533
534static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
535{
536 struct vm_area_struct *vma = vmf->vma;
537 struct drm_gem_object *obj = vma->vm_private_data;
538 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
539 loff_t num_pages = obj->size >> PAGE_SHIFT;
540 vm_fault_t ret;
541 struct page *page;
542 pgoff_t page_offset;
543
544 /* We don't use vmf->pgoff since that has the fake offset */
545 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
546
547 dma_resv_lock(shmem->base.resv, NULL);
548
549 if (page_offset >= num_pages ||
550 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
551 shmem->madv < 0) {
552 ret = VM_FAULT_SIGBUS;
553 } else {
554 page = shmem->pages[page_offset];
555
556 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
557 }
558
559 dma_resv_unlock(shmem->base.resv);
560
561 return ret;
562}
563
564static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
565{
566 struct drm_gem_object *obj = vma->vm_private_data;
567 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
568
569 drm_WARN_ON(obj->dev, obj->import_attach);
570
571 dma_resv_lock(shmem->base.resv, NULL);
572
573 /*
574 * We should have already pinned the pages when the buffer was first
575 * mmap'd, vm_open() just grabs an additional reference for the new
576 * mm the vma is getting copied into (ie. on fork()).
577 */
578 if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
579 shmem->pages_use_count++;
580
581 dma_resv_unlock(shmem->base.resv);
582
583 drm_gem_vm_open(vma);
584}
585
586static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
587{
588 struct drm_gem_object *obj = vma->vm_private_data;
589 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
590
591 dma_resv_lock(shmem->base.resv, NULL);
592 drm_gem_shmem_put_pages(shmem);
593 dma_resv_unlock(shmem->base.resv);
594
595 drm_gem_vm_close(vma);
596}
597
598const struct vm_operations_struct drm_gem_shmem_vm_ops = {
599 .fault = drm_gem_shmem_fault,
600 .open = drm_gem_shmem_vm_open,
601 .close = drm_gem_shmem_vm_close,
602};
603EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
604
605/**
606 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
607 * @shmem: shmem GEM object
608 * @vma: VMA for the area to be mapped
609 *
610 * This function implements an augmented version of the GEM DRM file mmap
611 * operation for shmem objects.
612 *
613 * Returns:
614 * 0 on success or a negative error code on failure.
615 */
616int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
617{
618 struct drm_gem_object *obj = &shmem->base;
619 int ret;
620
621 if (obj->import_attach) {
622 /* Reset both vm_ops and vm_private_data, so we don't end up with
623 * vm_ops pointing to our implementation if the dma-buf backend
624 * doesn't set those fields.
625 */
626 vma->vm_private_data = NULL;
627 vma->vm_ops = NULL;
628
629 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
630
631 /* Drop the reference drm_gem_mmap_obj() acquired.*/
632 if (!ret)
633 drm_gem_object_put(obj);
634
635 return ret;
636 }
637
638 if (is_cow_mapping(vma->vm_flags))
639 return -EINVAL;
640
641 dma_resv_lock(shmem->base.resv, NULL);
642 ret = drm_gem_shmem_get_pages(shmem);
643 dma_resv_unlock(shmem->base.resv);
644
645 if (ret)
646 return ret;
647
648 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
649 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
650 if (shmem->map_wc)
651 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
652
653 return 0;
654}
655EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
656
657/**
658 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
659 * @shmem: shmem GEM object
660 * @p: DRM printer
661 * @indent: Tab indentation level
662 */
663void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
664 struct drm_printer *p, unsigned int indent)
665{
666 if (shmem->base.import_attach)
667 return;
668
669 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
670 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
671 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
672}
673EXPORT_SYMBOL(drm_gem_shmem_print_info);
674
675/**
676 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
677 * pages for a shmem GEM object
678 * @shmem: shmem GEM object
679 *
680 * This function exports a scatter/gather table suitable for PRIME usage by
681 * calling the standard DMA mapping API.
682 *
683 * Drivers who need to acquire an scatter/gather table for objects need to call
684 * drm_gem_shmem_get_pages_sgt() instead.
685 *
686 * Returns:
687 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
688 */
689struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
690{
691 struct drm_gem_object *obj = &shmem->base;
692
693 drm_WARN_ON(obj->dev, obj->import_attach);
694
695 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
696}
697EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
698
699static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
700{
701 struct drm_gem_object *obj = &shmem->base;
702 int ret;
703 struct sg_table *sgt;
704
705 if (shmem->sgt)
706 return shmem->sgt;
707
708 drm_WARN_ON(obj->dev, obj->import_attach);
709
710 ret = drm_gem_shmem_get_pages(shmem);
711 if (ret)
712 return ERR_PTR(ret);
713
714 sgt = drm_gem_shmem_get_sg_table(shmem);
715 if (IS_ERR(sgt)) {
716 ret = PTR_ERR(sgt);
717 goto err_put_pages;
718 }
719 /* Map the pages for use by the h/w. */
720 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
721 if (ret)
722 goto err_free_sgt;
723
724 shmem->sgt = sgt;
725
726 return sgt;
727
728err_free_sgt:
729 sg_free_table(sgt);
730 kfree(sgt);
731err_put_pages:
732 drm_gem_shmem_put_pages(shmem);
733 return ERR_PTR(ret);
734}
735
736/**
737 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
738 * scatter/gather table for a shmem GEM object.
739 * @shmem: shmem GEM object
740 *
741 * This function returns a scatter/gather table suitable for driver usage. If
742 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
743 * table created.
744 *
745 * This is the main function for drivers to get at backing storage, and it hides
746 * and difference between dma-buf imported and natively allocated objects.
747 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
748 *
749 * Returns:
750 * A pointer to the scatter/gather table of pinned pages or errno on failure.
751 */
752struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
753{
754 int ret;
755 struct sg_table *sgt;
756
757 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
758 if (ret)
759 return ERR_PTR(ret);
760 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
761 dma_resv_unlock(shmem->base.resv);
762
763 return sgt;
764}
765EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
766
767/**
768 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
769 * another driver's scatter/gather table of pinned pages
770 * @dev: Device to import into
771 * @attach: DMA-BUF attachment
772 * @sgt: Scatter/gather table of pinned pages
773 *
774 * This function imports a scatter/gather table exported via DMA-BUF by
775 * another driver. Drivers that use the shmem helpers should set this as their
776 * &drm_driver.gem_prime_import_sg_table callback.
777 *
778 * Returns:
779 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
780 * error code on failure.
781 */
782struct drm_gem_object *
783drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
784 struct dma_buf_attachment *attach,
785 struct sg_table *sgt)
786{
787 size_t size = PAGE_ALIGN(attach->dmabuf->size);
788 struct drm_gem_shmem_object *shmem;
789
790 shmem = __drm_gem_shmem_create(dev, size, true, NULL);
791 if (IS_ERR(shmem))
792 return ERR_CAST(shmem);
793
794 shmem->sgt = sgt;
795
796 drm_dbg_prime(dev, "size = %zu\n", size);
797
798 return &shmem->base;
799}
800EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
801
802MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
803MODULE_IMPORT_NS("DMA_BUF");
804MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
8#include <linux/mutex.h>
9#include <linux/shmem_fs.h>
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm.h>
14#include <drm/drm_device.h>
15#include <drm/drm_drv.h>
16#include <drm/drm_gem_shmem_helper.h>
17#include <drm/drm_prime.h>
18#include <drm/drm_print.h>
19
20/**
21 * DOC: overview
22 *
23 * This library provides helpers for GEM objects backed by shmem buffers
24 * allocated using anonymous pageable memory.
25 */
26
27static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
28 .free = drm_gem_shmem_free_object,
29 .print_info = drm_gem_shmem_print_info,
30 .pin = drm_gem_shmem_pin,
31 .unpin = drm_gem_shmem_unpin,
32 .get_sg_table = drm_gem_shmem_get_sg_table,
33 .vmap = drm_gem_shmem_vmap,
34 .vunmap = drm_gem_shmem_vunmap,
35 .mmap = drm_gem_shmem_mmap,
36};
37
38static struct drm_gem_shmem_object *
39__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
40{
41 struct drm_gem_shmem_object *shmem;
42 struct drm_gem_object *obj;
43 int ret = 0;
44
45 size = PAGE_ALIGN(size);
46
47 if (dev->driver->gem_create_object)
48 obj = dev->driver->gem_create_object(dev, size);
49 else
50 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
51 if (!obj)
52 return ERR_PTR(-ENOMEM);
53
54 shmem = to_drm_gem_shmem_obj(obj);
55
56 if (!obj->funcs)
57 obj->funcs = &drm_gem_shmem_funcs;
58
59 if (private) {
60 drm_gem_private_object_init(dev, obj, size);
61 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
62 } else {
63 ret = drm_gem_object_init(dev, obj, size);
64 }
65 if (ret)
66 goto err_free;
67
68 ret = drm_gem_create_mmap_offset(obj);
69 if (ret)
70 goto err_release;
71
72 mutex_init(&shmem->pages_lock);
73 mutex_init(&shmem->vmap_lock);
74 INIT_LIST_HEAD(&shmem->madv_list);
75
76 if (!private) {
77 /*
78 * Our buffers are kept pinned, so allocating them
79 * from the MOVABLE zone is a really bad idea, and
80 * conflicts with CMA. See comments above new_inode()
81 * why this is required _and_ expected if you're
82 * going to pin these pages.
83 */
84 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
85 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
86 }
87
88 return shmem;
89
90err_release:
91 drm_gem_object_release(obj);
92err_free:
93 kfree(obj);
94
95 return ERR_PTR(ret);
96}
97/**
98 * drm_gem_shmem_create - Allocate an object with the given size
99 * @dev: DRM device
100 * @size: Size of the object to allocate
101 *
102 * This function creates a shmem GEM object.
103 *
104 * Returns:
105 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
106 * error code on failure.
107 */
108struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
109{
110 return __drm_gem_shmem_create(dev, size, false);
111}
112EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
113
114/**
115 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
116 * @obj: GEM object to free
117 *
118 * This function cleans up the GEM object state and frees the memory used to
119 * store the object itself. It should be used to implement
120 * &drm_gem_object_funcs.free.
121 */
122void drm_gem_shmem_free_object(struct drm_gem_object *obj)
123{
124 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
125
126 WARN_ON(shmem->vmap_use_count);
127
128 if (obj->import_attach) {
129 drm_prime_gem_destroy(obj, shmem->sgt);
130 } else {
131 if (shmem->sgt) {
132 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
133 DMA_BIDIRECTIONAL, 0);
134 sg_free_table(shmem->sgt);
135 kfree(shmem->sgt);
136 }
137 if (shmem->pages)
138 drm_gem_shmem_put_pages(shmem);
139 }
140
141 WARN_ON(shmem->pages_use_count);
142
143 drm_gem_object_release(obj);
144 mutex_destroy(&shmem->pages_lock);
145 mutex_destroy(&shmem->vmap_lock);
146 kfree(shmem);
147}
148EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
149
150static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
151{
152 struct drm_gem_object *obj = &shmem->base;
153 struct page **pages;
154
155 if (shmem->pages_use_count++ > 0)
156 return 0;
157
158 pages = drm_gem_get_pages(obj);
159 if (IS_ERR(pages)) {
160 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
161 shmem->pages_use_count = 0;
162 return PTR_ERR(pages);
163 }
164
165 shmem->pages = pages;
166
167 return 0;
168}
169
170/*
171 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
172 * @shmem: shmem GEM object
173 *
174 * This function makes sure that backing pages exists for the shmem GEM object
175 * and increases the use count.
176 *
177 * Returns:
178 * 0 on success or a negative error code on failure.
179 */
180int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
181{
182 int ret;
183
184 WARN_ON(shmem->base.import_attach);
185
186 ret = mutex_lock_interruptible(&shmem->pages_lock);
187 if (ret)
188 return ret;
189 ret = drm_gem_shmem_get_pages_locked(shmem);
190 mutex_unlock(&shmem->pages_lock);
191
192 return ret;
193}
194EXPORT_SYMBOL(drm_gem_shmem_get_pages);
195
196static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
197{
198 struct drm_gem_object *obj = &shmem->base;
199
200 if (WARN_ON_ONCE(!shmem->pages_use_count))
201 return;
202
203 if (--shmem->pages_use_count > 0)
204 return;
205
206 drm_gem_put_pages(obj, shmem->pages,
207 shmem->pages_mark_dirty_on_put,
208 shmem->pages_mark_accessed_on_put);
209 shmem->pages = NULL;
210}
211
212/*
213 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
214 * @shmem: shmem GEM object
215 *
216 * This function decreases the use count and puts the backing pages when use drops to zero.
217 */
218void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
219{
220 mutex_lock(&shmem->pages_lock);
221 drm_gem_shmem_put_pages_locked(shmem);
222 mutex_unlock(&shmem->pages_lock);
223}
224EXPORT_SYMBOL(drm_gem_shmem_put_pages);
225
226/**
227 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
228 * @obj: GEM object
229 *
230 * This function makes sure the backing pages are pinned in memory while the
231 * buffer is exported. It should only be used to implement
232 * &drm_gem_object_funcs.pin.
233 *
234 * Returns:
235 * 0 on success or a negative error code on failure.
236 */
237int drm_gem_shmem_pin(struct drm_gem_object *obj)
238{
239 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
240
241 WARN_ON(shmem->base.import_attach);
242
243 return drm_gem_shmem_get_pages(shmem);
244}
245EXPORT_SYMBOL(drm_gem_shmem_pin);
246
247/**
248 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
249 * @obj: GEM object
250 *
251 * This function removes the requirement that the backing pages are pinned in
252 * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
253 */
254void drm_gem_shmem_unpin(struct drm_gem_object *obj)
255{
256 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
257
258 WARN_ON(shmem->base.import_attach);
259
260 drm_gem_shmem_put_pages(shmem);
261}
262EXPORT_SYMBOL(drm_gem_shmem_unpin);
263
264static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
265{
266 struct drm_gem_object *obj = &shmem->base;
267 int ret = 0;
268
269 if (shmem->vmap_use_count++ > 0) {
270 dma_buf_map_set_vaddr(map, shmem->vaddr);
271 return 0;
272 }
273
274 if (obj->import_attach) {
275 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
276 if (!ret) {
277 if (WARN_ON(map->is_iomem)) {
278 ret = -EIO;
279 goto err_put_pages;
280 }
281 shmem->vaddr = map->vaddr;
282 }
283 } else {
284 pgprot_t prot = PAGE_KERNEL;
285
286 ret = drm_gem_shmem_get_pages(shmem);
287 if (ret)
288 goto err_zero_use;
289
290 if (shmem->map_wc)
291 prot = pgprot_writecombine(prot);
292 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
293 VM_MAP, prot);
294 if (!shmem->vaddr)
295 ret = -ENOMEM;
296 else
297 dma_buf_map_set_vaddr(map, shmem->vaddr);
298 }
299
300 if (ret) {
301 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
302 goto err_put_pages;
303 }
304
305 return 0;
306
307err_put_pages:
308 if (!obj->import_attach)
309 drm_gem_shmem_put_pages(shmem);
310err_zero_use:
311 shmem->vmap_use_count = 0;
312
313 return ret;
314}
315
316/*
317 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
318 * @shmem: shmem GEM object
319 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
320 * store.
321 *
322 * This function makes sure that a contiguous kernel virtual address mapping
323 * exists for the buffer backing the shmem GEM object.
324 *
325 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
326 * also be called by drivers directly, in which case it will hide the
327 * differences between dma-buf imported and natively allocated objects.
328 *
329 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
330 *
331 * Returns:
332 * 0 on success or a negative error code on failure.
333 */
334int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
335{
336 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
337 int ret;
338
339 ret = mutex_lock_interruptible(&shmem->vmap_lock);
340 if (ret)
341 return ret;
342 ret = drm_gem_shmem_vmap_locked(shmem, map);
343 mutex_unlock(&shmem->vmap_lock);
344
345 return ret;
346}
347EXPORT_SYMBOL(drm_gem_shmem_vmap);
348
349static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
350 struct dma_buf_map *map)
351{
352 struct drm_gem_object *obj = &shmem->base;
353
354 if (WARN_ON_ONCE(!shmem->vmap_use_count))
355 return;
356
357 if (--shmem->vmap_use_count > 0)
358 return;
359
360 if (obj->import_attach) {
361 dma_buf_vunmap(obj->import_attach->dmabuf, map);
362 } else {
363 vunmap(shmem->vaddr);
364 drm_gem_shmem_put_pages(shmem);
365 }
366
367 shmem->vaddr = NULL;
368}
369
370/*
371 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
372 * @shmem: shmem GEM object
373 * @map: Kernel virtual address where the SHMEM GEM object was mapped
374 *
375 * This function cleans up a kernel virtual address mapping acquired by
376 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
377 * zero.
378 *
379 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
380 * also be called by drivers directly, in which case it will hide the
381 * differences between dma-buf imported and natively allocated objects.
382 */
383void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
384{
385 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
386
387 mutex_lock(&shmem->vmap_lock);
388 drm_gem_shmem_vunmap_locked(shmem, map);
389 mutex_unlock(&shmem->vmap_lock);
390}
391EXPORT_SYMBOL(drm_gem_shmem_vunmap);
392
393struct drm_gem_shmem_object *
394drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
395 struct drm_device *dev, size_t size,
396 uint32_t *handle)
397{
398 struct drm_gem_shmem_object *shmem;
399 int ret;
400
401 shmem = drm_gem_shmem_create(dev, size);
402 if (IS_ERR(shmem))
403 return shmem;
404
405 /*
406 * Allocate an id of idr table where the obj is registered
407 * and handle has the id what user can see.
408 */
409 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
410 /* drop reference from allocate - handle holds it now. */
411 drm_gem_object_put(&shmem->base);
412 if (ret)
413 return ERR_PTR(ret);
414
415 return shmem;
416}
417EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
418
419/* Update madvise status, returns true if not purged, else
420 * false or -errno.
421 */
422int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
423{
424 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
425
426 mutex_lock(&shmem->pages_lock);
427
428 if (shmem->madv >= 0)
429 shmem->madv = madv;
430
431 madv = shmem->madv;
432
433 mutex_unlock(&shmem->pages_lock);
434
435 return (madv >= 0);
436}
437EXPORT_SYMBOL(drm_gem_shmem_madvise);
438
439void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
440{
441 struct drm_device *dev = obj->dev;
442 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
443
444 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
445
446 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
447 sg_free_table(shmem->sgt);
448 kfree(shmem->sgt);
449 shmem->sgt = NULL;
450
451 drm_gem_shmem_put_pages_locked(shmem);
452
453 shmem->madv = -1;
454
455 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
456 drm_gem_free_mmap_offset(obj);
457
458 /* Our goal here is to return as much of the memory as
459 * is possible back to the system as we are called from OOM.
460 * To do this we must instruct the shmfs to drop all of its
461 * backing pages, *now*.
462 */
463 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
464
465 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
466 0, (loff_t)-1);
467}
468EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
469
470bool drm_gem_shmem_purge(struct drm_gem_object *obj)
471{
472 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
473
474 if (!mutex_trylock(&shmem->pages_lock))
475 return false;
476 drm_gem_shmem_purge_locked(obj);
477 mutex_unlock(&shmem->pages_lock);
478
479 return true;
480}
481EXPORT_SYMBOL(drm_gem_shmem_purge);
482
483/**
484 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
485 * @file: DRM file structure to create the dumb buffer for
486 * @dev: DRM device
487 * @args: IOCTL data
488 *
489 * This function computes the pitch of the dumb buffer and rounds it up to an
490 * integer number of bytes per pixel. Drivers for hardware that doesn't have
491 * any additional restrictions on the pitch can directly use this function as
492 * their &drm_driver.dumb_create callback.
493 *
494 * For hardware with additional restrictions, drivers can adjust the fields
495 * set up by userspace before calling into this function.
496 *
497 * Returns:
498 * 0 on success or a negative error code on failure.
499 */
500int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
501 struct drm_mode_create_dumb *args)
502{
503 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
504 struct drm_gem_shmem_object *shmem;
505
506 if (!args->pitch || !args->size) {
507 args->pitch = min_pitch;
508 args->size = args->pitch * args->height;
509 } else {
510 /* ensure sane minimum values */
511 if (args->pitch < min_pitch)
512 args->pitch = min_pitch;
513 if (args->size < args->pitch * args->height)
514 args->size = args->pitch * args->height;
515 }
516
517 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
518
519 return PTR_ERR_OR_ZERO(shmem);
520}
521EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
522
523static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
524{
525 struct vm_area_struct *vma = vmf->vma;
526 struct drm_gem_object *obj = vma->vm_private_data;
527 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
528 loff_t num_pages = obj->size >> PAGE_SHIFT;
529 vm_fault_t ret;
530 struct page *page;
531 pgoff_t page_offset;
532
533 /* We don't use vmf->pgoff since that has the fake offset */
534 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
535
536 mutex_lock(&shmem->pages_lock);
537
538 if (page_offset >= num_pages ||
539 WARN_ON_ONCE(!shmem->pages) ||
540 shmem->madv < 0) {
541 ret = VM_FAULT_SIGBUS;
542 } else {
543 page = shmem->pages[page_offset];
544
545 ret = vmf_insert_page(vma, vmf->address, page);
546 }
547
548 mutex_unlock(&shmem->pages_lock);
549
550 return ret;
551}
552
553static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
554{
555 struct drm_gem_object *obj = vma->vm_private_data;
556 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
557 int ret;
558
559 WARN_ON(shmem->base.import_attach);
560
561 ret = drm_gem_shmem_get_pages(shmem);
562 WARN_ON_ONCE(ret != 0);
563
564 drm_gem_vm_open(vma);
565}
566
567static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
568{
569 struct drm_gem_object *obj = vma->vm_private_data;
570 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
571
572 drm_gem_shmem_put_pages(shmem);
573 drm_gem_vm_close(vma);
574}
575
576static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
577 .fault = drm_gem_shmem_fault,
578 .open = drm_gem_shmem_vm_open,
579 .close = drm_gem_shmem_vm_close,
580};
581
582/**
583 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
584 * @obj: gem object
585 * @vma: VMA for the area to be mapped
586 *
587 * This function implements an augmented version of the GEM DRM file mmap
588 * operation for shmem objects. Drivers which employ the shmem helpers should
589 * use this function as their &drm_gem_object_funcs.mmap handler.
590 *
591 * Returns:
592 * 0 on success or a negative error code on failure.
593 */
594int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
595{
596 struct drm_gem_shmem_object *shmem;
597 int ret;
598
599 if (obj->import_attach) {
600 /* Drop the reference drm_gem_mmap_obj() acquired.*/
601 drm_gem_object_put(obj);
602 vma->vm_private_data = NULL;
603
604 return dma_buf_mmap(obj->dma_buf, vma, 0);
605 }
606
607 shmem = to_drm_gem_shmem_obj(obj);
608
609 ret = drm_gem_shmem_get_pages(shmem);
610 if (ret) {
611 drm_gem_vm_close(vma);
612 return ret;
613 }
614
615 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
616 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
617 if (shmem->map_wc)
618 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
619 vma->vm_ops = &drm_gem_shmem_vm_ops;
620
621 return 0;
622}
623EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
624
625/**
626 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
627 * @p: DRM printer
628 * @indent: Tab indentation level
629 * @obj: GEM object
630 *
631 * This implements the &drm_gem_object_funcs.info callback.
632 */
633void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
634 const struct drm_gem_object *obj)
635{
636 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
637
638 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
639 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
640 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
641}
642EXPORT_SYMBOL(drm_gem_shmem_print_info);
643
644/**
645 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
646 * pages for a shmem GEM object
647 * @obj: GEM object
648 *
649 * This function exports a scatter/gather table suitable for PRIME usage by
650 * calling the standard DMA mapping API. Drivers should not call this function
651 * directly, instead it should only be used as an implementation for
652 * &drm_gem_object_funcs.get_sg_table.
653 *
654 * Drivers who need to acquire an scatter/gather table for objects need to call
655 * drm_gem_shmem_get_pages_sgt() instead.
656 *
657 * Returns:
658 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
659 */
660struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
661{
662 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
663
664 WARN_ON(shmem->base.import_attach);
665
666 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
667}
668EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
669
670/**
671 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
672 * scatter/gather table for a shmem GEM object.
673 * @obj: GEM object
674 *
675 * This function returns a scatter/gather table suitable for driver usage. If
676 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
677 * table created.
678 *
679 * This is the main function for drivers to get at backing storage, and it hides
680 * and difference between dma-buf imported and natively allocated objects.
681 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
682 *
683 * Returns:
684 * A pointer to the scatter/gather table of pinned pages or errno on failure.
685 */
686struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
687{
688 int ret;
689 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
690 struct sg_table *sgt;
691
692 if (shmem->sgt)
693 return shmem->sgt;
694
695 WARN_ON(obj->import_attach);
696
697 ret = drm_gem_shmem_get_pages(shmem);
698 if (ret)
699 return ERR_PTR(ret);
700
701 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
702 if (IS_ERR(sgt)) {
703 ret = PTR_ERR(sgt);
704 goto err_put_pages;
705 }
706 /* Map the pages for use by the h/w. */
707 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
708 if (ret)
709 goto err_free_sgt;
710
711 shmem->sgt = sgt;
712
713 return sgt;
714
715err_free_sgt:
716 sg_free_table(sgt);
717 kfree(sgt);
718err_put_pages:
719 drm_gem_shmem_put_pages(shmem);
720 return ERR_PTR(ret);
721}
722EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
723
724/**
725 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
726 * another driver's scatter/gather table of pinned pages
727 * @dev: Device to import into
728 * @attach: DMA-BUF attachment
729 * @sgt: Scatter/gather table of pinned pages
730 *
731 * This function imports a scatter/gather table exported via DMA-BUF by
732 * another driver. Drivers that use the shmem helpers should set this as their
733 * &drm_driver.gem_prime_import_sg_table callback.
734 *
735 * Returns:
736 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
737 * error code on failure.
738 */
739struct drm_gem_object *
740drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
741 struct dma_buf_attachment *attach,
742 struct sg_table *sgt)
743{
744 size_t size = PAGE_ALIGN(attach->dmabuf->size);
745 struct drm_gem_shmem_object *shmem;
746
747 shmem = __drm_gem_shmem_create(dev, size, true);
748 if (IS_ERR(shmem))
749 return ERR_CAST(shmem);
750
751 shmem->sgt = sgt;
752
753 DRM_DEBUG_PRIME("size = %zu\n", size);
754
755 return &shmem->base;
756}
757EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);