Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
8#include <linux/module.h>
9#include <linux/mutex.h>
10#include <linux/shmem_fs.h>
11#include <linux/slab.h>
12#include <linux/vmalloc.h>
13#include <linux/module.h>
14
15#ifdef CONFIG_X86
16#include <asm/set_memory.h>
17#endif
18
19#include <drm/drm.h>
20#include <drm/drm_device.h>
21#include <drm/drm_drv.h>
22#include <drm/drm_gem_shmem_helper.h>
23#include <drm/drm_prime.h>
24#include <drm/drm_print.h>
25
26MODULE_IMPORT_NS(DMA_BUF);
27
28/**
29 * DOC: overview
30 *
31 * This library provides helpers for GEM objects backed by shmem buffers
32 * allocated using anonymous pageable memory.
33 *
34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38 */
39
40static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 .free = drm_gem_shmem_object_free,
42 .print_info = drm_gem_shmem_object_print_info,
43 .pin = drm_gem_shmem_object_pin,
44 .unpin = drm_gem_shmem_object_unpin,
45 .get_sg_table = drm_gem_shmem_object_get_sg_table,
46 .vmap = drm_gem_shmem_object_vmap,
47 .vunmap = drm_gem_shmem_object_vunmap,
48 .mmap = drm_gem_shmem_object_mmap,
49 .vm_ops = &drm_gem_shmem_vm_ops,
50};
51
52static struct drm_gem_shmem_object *
53__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
54{
55 struct drm_gem_shmem_object *shmem;
56 struct drm_gem_object *obj;
57 int ret = 0;
58
59 size = PAGE_ALIGN(size);
60
61 if (dev->driver->gem_create_object) {
62 obj = dev->driver->gem_create_object(dev, size);
63 if (IS_ERR(obj))
64 return ERR_CAST(obj);
65 shmem = to_drm_gem_shmem_obj(obj);
66 } else {
67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 if (!shmem)
69 return ERR_PTR(-ENOMEM);
70 obj = &shmem->base;
71 }
72
73 if (!obj->funcs)
74 obj->funcs = &drm_gem_shmem_funcs;
75
76 if (private) {
77 drm_gem_private_object_init(dev, obj, size);
78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 } else {
80 ret = drm_gem_object_init(dev, obj, size);
81 }
82 if (ret) {
83 drm_gem_private_object_fini(obj);
84 goto err_free;
85 }
86
87 ret = drm_gem_create_mmap_offset(obj);
88 if (ret)
89 goto err_release;
90
91 INIT_LIST_HEAD(&shmem->madv_list);
92
93 if (!private) {
94 /*
95 * Our buffers are kept pinned, so allocating them
96 * from the MOVABLE zone is a really bad idea, and
97 * conflicts with CMA. See comments above new_inode()
98 * why this is required _and_ expected if you're
99 * going to pin these pages.
100 */
101 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103 }
104
105 return shmem;
106
107err_release:
108 drm_gem_object_release(obj);
109err_free:
110 kfree(obj);
111
112 return ERR_PTR(ret);
113}
114/**
115 * drm_gem_shmem_create - Allocate an object with the given size
116 * @dev: DRM device
117 * @size: Size of the object to allocate
118 *
119 * This function creates a shmem GEM object.
120 *
121 * Returns:
122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123 * error code on failure.
124 */
125struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126{
127 return __drm_gem_shmem_create(dev, size, false);
128}
129EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130
131/**
132 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
133 * @shmem: shmem GEM object to free
134 *
135 * This function cleans up the GEM object state and frees the memory used to
136 * store the object itself.
137 */
138void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
139{
140 struct drm_gem_object *obj = &shmem->base;
141
142 if (obj->import_attach) {
143 drm_prime_gem_destroy(obj, shmem->sgt);
144 } else {
145 dma_resv_lock(shmem->base.resv, NULL);
146
147 drm_WARN_ON(obj->dev, shmem->vmap_use_count);
148
149 if (shmem->sgt) {
150 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
151 DMA_BIDIRECTIONAL, 0);
152 sg_free_table(shmem->sgt);
153 kfree(shmem->sgt);
154 }
155 if (shmem->pages)
156 drm_gem_shmem_put_pages(shmem);
157
158 drm_WARN_ON(obj->dev, shmem->pages_use_count);
159
160 dma_resv_unlock(shmem->base.resv);
161 }
162
163 drm_gem_object_release(obj);
164 kfree(shmem);
165}
166EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
167
168static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
169{
170 struct drm_gem_object *obj = &shmem->base;
171 struct page **pages;
172
173 dma_resv_assert_held(shmem->base.resv);
174
175 if (shmem->pages_use_count++ > 0)
176 return 0;
177
178 pages = drm_gem_get_pages(obj);
179 if (IS_ERR(pages)) {
180 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
181 PTR_ERR(pages));
182 shmem->pages_use_count = 0;
183 return PTR_ERR(pages);
184 }
185
186 /*
187 * TODO: Allocating WC pages which are correctly flushed is only
188 * supported on x86. Ideal solution would be a GFP_WC flag, which also
189 * ttm_pool.c could use.
190 */
191#ifdef CONFIG_X86
192 if (shmem->map_wc)
193 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
194#endif
195
196 shmem->pages = pages;
197
198 return 0;
199}
200
201/*
202 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
203 * @shmem: shmem GEM object
204 *
205 * This function decreases the use count and puts the backing pages when use drops to zero.
206 */
207void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
208{
209 struct drm_gem_object *obj = &shmem->base;
210
211 dma_resv_assert_held(shmem->base.resv);
212
213 if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
214 return;
215
216 if (--shmem->pages_use_count > 0)
217 return;
218
219#ifdef CONFIG_X86
220 if (shmem->map_wc)
221 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
222#endif
223
224 drm_gem_put_pages(obj, shmem->pages,
225 shmem->pages_mark_dirty_on_put,
226 shmem->pages_mark_accessed_on_put);
227 shmem->pages = NULL;
228}
229EXPORT_SYMBOL(drm_gem_shmem_put_pages);
230
231static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
232{
233 int ret;
234
235 dma_resv_assert_held(shmem->base.resv);
236
237 ret = drm_gem_shmem_get_pages(shmem);
238
239 return ret;
240}
241
242static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
243{
244 dma_resv_assert_held(shmem->base.resv);
245
246 drm_gem_shmem_put_pages(shmem);
247}
248
249/**
250 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
251 * @shmem: shmem GEM object
252 *
253 * This function makes sure the backing pages are pinned in memory while the
254 * buffer is exported.
255 *
256 * Returns:
257 * 0 on success or a negative error code on failure.
258 */
259int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
260{
261 struct drm_gem_object *obj = &shmem->base;
262 int ret;
263
264 drm_WARN_ON(obj->dev, obj->import_attach);
265
266 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
267 if (ret)
268 return ret;
269 ret = drm_gem_shmem_pin_locked(shmem);
270 dma_resv_unlock(shmem->base.resv);
271
272 return ret;
273}
274EXPORT_SYMBOL(drm_gem_shmem_pin);
275
276/**
277 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
278 * @shmem: shmem GEM object
279 *
280 * This function removes the requirement that the backing pages are pinned in
281 * memory.
282 */
283void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
284{
285 struct drm_gem_object *obj = &shmem->base;
286
287 drm_WARN_ON(obj->dev, obj->import_attach);
288
289 dma_resv_lock(shmem->base.resv, NULL);
290 drm_gem_shmem_unpin_locked(shmem);
291 dma_resv_unlock(shmem->base.resv);
292}
293EXPORT_SYMBOL(drm_gem_shmem_unpin);
294
295/*
296 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
297 * @shmem: shmem GEM object
298 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
299 * store.
300 *
301 * This function makes sure that a contiguous kernel virtual address mapping
302 * exists for the buffer backing the shmem GEM object. It hides the differences
303 * between dma-buf imported and natively allocated objects.
304 *
305 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
306 *
307 * Returns:
308 * 0 on success or a negative error code on failure.
309 */
310int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
311 struct iosys_map *map)
312{
313 struct drm_gem_object *obj = &shmem->base;
314 int ret = 0;
315
316 if (obj->import_attach) {
317 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
318 if (!ret) {
319 if (drm_WARN_ON(obj->dev, map->is_iomem)) {
320 dma_buf_vunmap(obj->import_attach->dmabuf, map);
321 return -EIO;
322 }
323 }
324 } else {
325 pgprot_t prot = PAGE_KERNEL;
326
327 dma_resv_assert_held(shmem->base.resv);
328
329 if (shmem->vmap_use_count++ > 0) {
330 iosys_map_set_vaddr(map, shmem->vaddr);
331 return 0;
332 }
333
334 ret = drm_gem_shmem_get_pages(shmem);
335 if (ret)
336 goto err_zero_use;
337
338 if (shmem->map_wc)
339 prot = pgprot_writecombine(prot);
340 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
341 VM_MAP, prot);
342 if (!shmem->vaddr)
343 ret = -ENOMEM;
344 else
345 iosys_map_set_vaddr(map, shmem->vaddr);
346 }
347
348 if (ret) {
349 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
350 goto err_put_pages;
351 }
352
353 return 0;
354
355err_put_pages:
356 if (!obj->import_attach)
357 drm_gem_shmem_put_pages(shmem);
358err_zero_use:
359 shmem->vmap_use_count = 0;
360
361 return ret;
362}
363EXPORT_SYMBOL(drm_gem_shmem_vmap);
364
365/*
366 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
367 * @shmem: shmem GEM object
368 * @map: Kernel virtual address where the SHMEM GEM object was mapped
369 *
370 * This function cleans up a kernel virtual address mapping acquired by
371 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
372 * zero.
373 *
374 * This function hides the differences between dma-buf imported and natively
375 * allocated objects.
376 */
377void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
378 struct iosys_map *map)
379{
380 struct drm_gem_object *obj = &shmem->base;
381
382 if (obj->import_attach) {
383 dma_buf_vunmap(obj->import_attach->dmabuf, map);
384 } else {
385 dma_resv_assert_held(shmem->base.resv);
386
387 if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
388 return;
389
390 if (--shmem->vmap_use_count > 0)
391 return;
392
393 vunmap(shmem->vaddr);
394 drm_gem_shmem_put_pages(shmem);
395 }
396
397 shmem->vaddr = NULL;
398}
399EXPORT_SYMBOL(drm_gem_shmem_vunmap);
400
401static int
402drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
403 struct drm_device *dev, size_t size,
404 uint32_t *handle)
405{
406 struct drm_gem_shmem_object *shmem;
407 int ret;
408
409 shmem = drm_gem_shmem_create(dev, size);
410 if (IS_ERR(shmem))
411 return PTR_ERR(shmem);
412
413 /*
414 * Allocate an id of idr table where the obj is registered
415 * and handle has the id what user can see.
416 */
417 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
418 /* drop reference from allocate - handle holds it now. */
419 drm_gem_object_put(&shmem->base);
420
421 return ret;
422}
423
424/* Update madvise status, returns true if not purged, else
425 * false or -errno.
426 */
427int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
428{
429 dma_resv_assert_held(shmem->base.resv);
430
431 if (shmem->madv >= 0)
432 shmem->madv = madv;
433
434 madv = shmem->madv;
435
436 return (madv >= 0);
437}
438EXPORT_SYMBOL(drm_gem_shmem_madvise);
439
440void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
441{
442 struct drm_gem_object *obj = &shmem->base;
443 struct drm_device *dev = obj->dev;
444
445 dma_resv_assert_held(shmem->base.resv);
446
447 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
448
449 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
450 sg_free_table(shmem->sgt);
451 kfree(shmem->sgt);
452 shmem->sgt = NULL;
453
454 drm_gem_shmem_put_pages(shmem);
455
456 shmem->madv = -1;
457
458 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
459 drm_gem_free_mmap_offset(obj);
460
461 /* Our goal here is to return as much of the memory as
462 * is possible back to the system as we are called from OOM.
463 * To do this we must instruct the shmfs to drop all of its
464 * backing pages, *now*.
465 */
466 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
467
468 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
469}
470EXPORT_SYMBOL(drm_gem_shmem_purge);
471
472/**
473 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
474 * @file: DRM file structure to create the dumb buffer for
475 * @dev: DRM device
476 * @args: IOCTL data
477 *
478 * This function computes the pitch of the dumb buffer and rounds it up to an
479 * integer number of bytes per pixel. Drivers for hardware that doesn't have
480 * any additional restrictions on the pitch can directly use this function as
481 * their &drm_driver.dumb_create callback.
482 *
483 * For hardware with additional restrictions, drivers can adjust the fields
484 * set up by userspace before calling into this function.
485 *
486 * Returns:
487 * 0 on success or a negative error code on failure.
488 */
489int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
490 struct drm_mode_create_dumb *args)
491{
492 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
493
494 if (!args->pitch || !args->size) {
495 args->pitch = min_pitch;
496 args->size = PAGE_ALIGN(args->pitch * args->height);
497 } else {
498 /* ensure sane minimum values */
499 if (args->pitch < min_pitch)
500 args->pitch = min_pitch;
501 if (args->size < args->pitch * args->height)
502 args->size = PAGE_ALIGN(args->pitch * args->height);
503 }
504
505 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
506}
507EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
508
509static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
510{
511 struct vm_area_struct *vma = vmf->vma;
512 struct drm_gem_object *obj = vma->vm_private_data;
513 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
514 loff_t num_pages = obj->size >> PAGE_SHIFT;
515 vm_fault_t ret;
516 struct page *page;
517 pgoff_t page_offset;
518
519 /* We don't use vmf->pgoff since that has the fake offset */
520 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
521
522 dma_resv_lock(shmem->base.resv, NULL);
523
524 if (page_offset >= num_pages ||
525 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
526 shmem->madv < 0) {
527 ret = VM_FAULT_SIGBUS;
528 } else {
529 page = shmem->pages[page_offset];
530
531 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
532 }
533
534 dma_resv_unlock(shmem->base.resv);
535
536 return ret;
537}
538
539static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
540{
541 struct drm_gem_object *obj = vma->vm_private_data;
542 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
543
544 drm_WARN_ON(obj->dev, obj->import_attach);
545
546 dma_resv_lock(shmem->base.resv, NULL);
547
548 /*
549 * We should have already pinned the pages when the buffer was first
550 * mmap'd, vm_open() just grabs an additional reference for the new
551 * mm the vma is getting copied into (ie. on fork()).
552 */
553 if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
554 shmem->pages_use_count++;
555
556 dma_resv_unlock(shmem->base.resv);
557
558 drm_gem_vm_open(vma);
559}
560
561static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
562{
563 struct drm_gem_object *obj = vma->vm_private_data;
564 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
565
566 dma_resv_lock(shmem->base.resv, NULL);
567 drm_gem_shmem_put_pages(shmem);
568 dma_resv_unlock(shmem->base.resv);
569
570 drm_gem_vm_close(vma);
571}
572
573const struct vm_operations_struct drm_gem_shmem_vm_ops = {
574 .fault = drm_gem_shmem_fault,
575 .open = drm_gem_shmem_vm_open,
576 .close = drm_gem_shmem_vm_close,
577};
578EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
579
580/**
581 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
582 * @shmem: shmem GEM object
583 * @vma: VMA for the area to be mapped
584 *
585 * This function implements an augmented version of the GEM DRM file mmap
586 * operation for shmem objects.
587 *
588 * Returns:
589 * 0 on success or a negative error code on failure.
590 */
591int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
592{
593 struct drm_gem_object *obj = &shmem->base;
594 int ret;
595
596 if (obj->import_attach) {
597 /* Reset both vm_ops and vm_private_data, so we don't end up with
598 * vm_ops pointing to our implementation if the dma-buf backend
599 * doesn't set those fields.
600 */
601 vma->vm_private_data = NULL;
602 vma->vm_ops = NULL;
603
604 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
605
606 /* Drop the reference drm_gem_mmap_obj() acquired.*/
607 if (!ret)
608 drm_gem_object_put(obj);
609
610 return ret;
611 }
612
613 dma_resv_lock(shmem->base.resv, NULL);
614 ret = drm_gem_shmem_get_pages(shmem);
615 dma_resv_unlock(shmem->base.resv);
616
617 if (ret)
618 return ret;
619
620 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
621 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
622 if (shmem->map_wc)
623 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
624
625 return 0;
626}
627EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
628
629/**
630 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
631 * @shmem: shmem GEM object
632 * @p: DRM printer
633 * @indent: Tab indentation level
634 */
635void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
636 struct drm_printer *p, unsigned int indent)
637{
638 if (shmem->base.import_attach)
639 return;
640
641 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
642 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
643 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
644}
645EXPORT_SYMBOL(drm_gem_shmem_print_info);
646
647/**
648 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
649 * pages for a shmem GEM object
650 * @shmem: shmem GEM object
651 *
652 * This function exports a scatter/gather table suitable for PRIME usage by
653 * calling the standard DMA mapping API.
654 *
655 * Drivers who need to acquire an scatter/gather table for objects need to call
656 * drm_gem_shmem_get_pages_sgt() instead.
657 *
658 * Returns:
659 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
660 */
661struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
662{
663 struct drm_gem_object *obj = &shmem->base;
664
665 drm_WARN_ON(obj->dev, obj->import_attach);
666
667 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
668}
669EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
670
671static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
672{
673 struct drm_gem_object *obj = &shmem->base;
674 int ret;
675 struct sg_table *sgt;
676
677 if (shmem->sgt)
678 return shmem->sgt;
679
680 drm_WARN_ON(obj->dev, obj->import_attach);
681
682 ret = drm_gem_shmem_get_pages(shmem);
683 if (ret)
684 return ERR_PTR(ret);
685
686 sgt = drm_gem_shmem_get_sg_table(shmem);
687 if (IS_ERR(sgt)) {
688 ret = PTR_ERR(sgt);
689 goto err_put_pages;
690 }
691 /* Map the pages for use by the h/w. */
692 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
693 if (ret)
694 goto err_free_sgt;
695
696 shmem->sgt = sgt;
697
698 return sgt;
699
700err_free_sgt:
701 sg_free_table(sgt);
702 kfree(sgt);
703err_put_pages:
704 drm_gem_shmem_put_pages(shmem);
705 return ERR_PTR(ret);
706}
707
708/**
709 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
710 * scatter/gather table for a shmem GEM object.
711 * @shmem: shmem GEM object
712 *
713 * This function returns a scatter/gather table suitable for driver usage. If
714 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
715 * table created.
716 *
717 * This is the main function for drivers to get at backing storage, and it hides
718 * and difference between dma-buf imported and natively allocated objects.
719 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
720 *
721 * Returns:
722 * A pointer to the scatter/gather table of pinned pages or errno on failure.
723 */
724struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
725{
726 int ret;
727 struct sg_table *sgt;
728
729 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
730 if (ret)
731 return ERR_PTR(ret);
732 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
733 dma_resv_unlock(shmem->base.resv);
734
735 return sgt;
736}
737EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
738
739/**
740 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
741 * another driver's scatter/gather table of pinned pages
742 * @dev: Device to import into
743 * @attach: DMA-BUF attachment
744 * @sgt: Scatter/gather table of pinned pages
745 *
746 * This function imports a scatter/gather table exported via DMA-BUF by
747 * another driver. Drivers that use the shmem helpers should set this as their
748 * &drm_driver.gem_prime_import_sg_table callback.
749 *
750 * Returns:
751 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
752 * error code on failure.
753 */
754struct drm_gem_object *
755drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
756 struct dma_buf_attachment *attach,
757 struct sg_table *sgt)
758{
759 size_t size = PAGE_ALIGN(attach->dmabuf->size);
760 struct drm_gem_shmem_object *shmem;
761
762 shmem = __drm_gem_shmem_create(dev, size, true);
763 if (IS_ERR(shmem))
764 return ERR_CAST(shmem);
765
766 shmem->sgt = sgt;
767
768 drm_dbg_prime(dev, "size = %zu\n", size);
769
770 return &shmem->base;
771}
772EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
773
774MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
775MODULE_IMPORT_NS(DMA_BUF);
776MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
8#include <linux/mutex.h>
9#include <linux/shmem_fs.h>
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm.h>
14#include <drm/drm_device.h>
15#include <drm/drm_drv.h>
16#include <drm/drm_gem_shmem_helper.h>
17#include <drm/drm_prime.h>
18#include <drm/drm_print.h>
19
20/**
21 * DOC: overview
22 *
23 * This library provides helpers for GEM objects backed by shmem buffers
24 * allocated using anonymous pageable memory.
25 */
26
27static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
28 .free = drm_gem_shmem_free_object,
29 .print_info = drm_gem_shmem_print_info,
30 .pin = drm_gem_shmem_pin,
31 .unpin = drm_gem_shmem_unpin,
32 .get_sg_table = drm_gem_shmem_get_sg_table,
33 .vmap = drm_gem_shmem_vmap,
34 .vunmap = drm_gem_shmem_vunmap,
35 .mmap = drm_gem_shmem_mmap,
36};
37
38static struct drm_gem_shmem_object *
39__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
40{
41 struct drm_gem_shmem_object *shmem;
42 struct drm_gem_object *obj;
43 int ret = 0;
44
45 size = PAGE_ALIGN(size);
46
47 if (dev->driver->gem_create_object)
48 obj = dev->driver->gem_create_object(dev, size);
49 else
50 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
51 if (!obj)
52 return ERR_PTR(-ENOMEM);
53
54 if (!obj->funcs)
55 obj->funcs = &drm_gem_shmem_funcs;
56
57 if (private)
58 drm_gem_private_object_init(dev, obj, size);
59 else
60 ret = drm_gem_object_init(dev, obj, size);
61 if (ret)
62 goto err_free;
63
64 ret = drm_gem_create_mmap_offset(obj);
65 if (ret)
66 goto err_release;
67
68 shmem = to_drm_gem_shmem_obj(obj);
69 mutex_init(&shmem->pages_lock);
70 mutex_init(&shmem->vmap_lock);
71 INIT_LIST_HEAD(&shmem->madv_list);
72
73 if (!private) {
74 /*
75 * Our buffers are kept pinned, so allocating them
76 * from the MOVABLE zone is a really bad idea, and
77 * conflicts with CMA. See comments above new_inode()
78 * why this is required _and_ expected if you're
79 * going to pin these pages.
80 */
81 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
82 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
83 }
84
85 return shmem;
86
87err_release:
88 drm_gem_object_release(obj);
89err_free:
90 kfree(obj);
91
92 return ERR_PTR(ret);
93}
94/**
95 * drm_gem_shmem_create - Allocate an object with the given size
96 * @dev: DRM device
97 * @size: Size of the object to allocate
98 *
99 * This function creates a shmem GEM object.
100 *
101 * Returns:
102 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
103 * error code on failure.
104 */
105struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
106{
107 return __drm_gem_shmem_create(dev, size, false);
108}
109EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
110
111/**
112 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
113 * @obj: GEM object to free
114 *
115 * This function cleans up the GEM object state and frees the memory used to
116 * store the object itself. It should be used to implement
117 * &drm_gem_object_funcs.free.
118 */
119void drm_gem_shmem_free_object(struct drm_gem_object *obj)
120{
121 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
122
123 WARN_ON(shmem->vmap_use_count);
124
125 if (obj->import_attach) {
126 drm_prime_gem_destroy(obj, shmem->sgt);
127 } else {
128 if (shmem->sgt) {
129 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
130 shmem->sgt->nents, DMA_BIDIRECTIONAL);
131 sg_free_table(shmem->sgt);
132 kfree(shmem->sgt);
133 }
134 if (shmem->pages)
135 drm_gem_shmem_put_pages(shmem);
136 }
137
138 WARN_ON(shmem->pages_use_count);
139
140 drm_gem_object_release(obj);
141 mutex_destroy(&shmem->pages_lock);
142 mutex_destroy(&shmem->vmap_lock);
143 kfree(shmem);
144}
145EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
146
147static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
148{
149 struct drm_gem_object *obj = &shmem->base;
150 struct page **pages;
151
152 if (shmem->pages_use_count++ > 0)
153 return 0;
154
155 pages = drm_gem_get_pages(obj);
156 if (IS_ERR(pages)) {
157 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
158 shmem->pages_use_count = 0;
159 return PTR_ERR(pages);
160 }
161
162 shmem->pages = pages;
163
164 return 0;
165}
166
167/*
168 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
169 * @shmem: shmem GEM object
170 *
171 * This function makes sure that backing pages exists for the shmem GEM object
172 * and increases the use count.
173 *
174 * Returns:
175 * 0 on success or a negative error code on failure.
176 */
177int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
178{
179 int ret;
180
181 WARN_ON(shmem->base.import_attach);
182
183 ret = mutex_lock_interruptible(&shmem->pages_lock);
184 if (ret)
185 return ret;
186 ret = drm_gem_shmem_get_pages_locked(shmem);
187 mutex_unlock(&shmem->pages_lock);
188
189 return ret;
190}
191EXPORT_SYMBOL(drm_gem_shmem_get_pages);
192
193static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
194{
195 struct drm_gem_object *obj = &shmem->base;
196
197 if (WARN_ON_ONCE(!shmem->pages_use_count))
198 return;
199
200 if (--shmem->pages_use_count > 0)
201 return;
202
203 drm_gem_put_pages(obj, shmem->pages,
204 shmem->pages_mark_dirty_on_put,
205 shmem->pages_mark_accessed_on_put);
206 shmem->pages = NULL;
207}
208
209/*
210 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
211 * @shmem: shmem GEM object
212 *
213 * This function decreases the use count and puts the backing pages when use drops to zero.
214 */
215void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
216{
217 mutex_lock(&shmem->pages_lock);
218 drm_gem_shmem_put_pages_locked(shmem);
219 mutex_unlock(&shmem->pages_lock);
220}
221EXPORT_SYMBOL(drm_gem_shmem_put_pages);
222
223/**
224 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
225 * @obj: GEM object
226 *
227 * This function makes sure the backing pages are pinned in memory while the
228 * buffer is exported. It should only be used to implement
229 * &drm_gem_object_funcs.pin.
230 *
231 * Returns:
232 * 0 on success or a negative error code on failure.
233 */
234int drm_gem_shmem_pin(struct drm_gem_object *obj)
235{
236 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
237
238 WARN_ON(shmem->base.import_attach);
239
240 return drm_gem_shmem_get_pages(shmem);
241}
242EXPORT_SYMBOL(drm_gem_shmem_pin);
243
244/**
245 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
246 * @obj: GEM object
247 *
248 * This function removes the requirement that the backing pages are pinned in
249 * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
250 */
251void drm_gem_shmem_unpin(struct drm_gem_object *obj)
252{
253 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
254
255 WARN_ON(shmem->base.import_attach);
256
257 drm_gem_shmem_put_pages(shmem);
258}
259EXPORT_SYMBOL(drm_gem_shmem_unpin);
260
261static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
262{
263 struct drm_gem_object *obj = &shmem->base;
264 int ret;
265
266 if (shmem->vmap_use_count++ > 0)
267 return shmem->vaddr;
268
269 if (obj->import_attach) {
270 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
271 } else {
272 pgprot_t prot = PAGE_KERNEL;
273
274 ret = drm_gem_shmem_get_pages(shmem);
275 if (ret)
276 goto err_zero_use;
277
278 if (!shmem->map_cached)
279 prot = pgprot_writecombine(prot);
280 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
281 VM_MAP, prot);
282 }
283
284 if (!shmem->vaddr) {
285 DRM_DEBUG_KMS("Failed to vmap pages\n");
286 ret = -ENOMEM;
287 goto err_put_pages;
288 }
289
290 return shmem->vaddr;
291
292err_put_pages:
293 if (!obj->import_attach)
294 drm_gem_shmem_put_pages(shmem);
295err_zero_use:
296 shmem->vmap_use_count = 0;
297
298 return ERR_PTR(ret);
299}
300
301/*
302 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
303 * @shmem: shmem GEM object
304 *
305 * This function makes sure that a contiguous kernel virtual address mapping
306 * exists for the buffer backing the shmem GEM object.
307 *
308 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
309 * also be called by drivers directly, in which case it will hide the
310 * differences between dma-buf imported and natively allocated objects.
311 *
312 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
313 *
314 * Returns:
315 * 0 on success or a negative error code on failure.
316 */
317void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
318{
319 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
320 void *vaddr;
321 int ret;
322
323 ret = mutex_lock_interruptible(&shmem->vmap_lock);
324 if (ret)
325 return ERR_PTR(ret);
326 vaddr = drm_gem_shmem_vmap_locked(shmem);
327 mutex_unlock(&shmem->vmap_lock);
328
329 return vaddr;
330}
331EXPORT_SYMBOL(drm_gem_shmem_vmap);
332
333static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
334{
335 struct drm_gem_object *obj = &shmem->base;
336
337 if (WARN_ON_ONCE(!shmem->vmap_use_count))
338 return;
339
340 if (--shmem->vmap_use_count > 0)
341 return;
342
343 if (obj->import_attach)
344 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
345 else
346 vunmap(shmem->vaddr);
347
348 shmem->vaddr = NULL;
349 drm_gem_shmem_put_pages(shmem);
350}
351
352/*
353 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
354 * @shmem: shmem GEM object
355 *
356 * This function cleans up a kernel virtual address mapping acquired by
357 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
358 * zero.
359 *
360 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
361 * also be called by drivers directly, in which case it will hide the
362 * differences between dma-buf imported and natively allocated objects.
363 */
364void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
365{
366 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
367
368 mutex_lock(&shmem->vmap_lock);
369 drm_gem_shmem_vunmap_locked(shmem);
370 mutex_unlock(&shmem->vmap_lock);
371}
372EXPORT_SYMBOL(drm_gem_shmem_vunmap);
373
374struct drm_gem_shmem_object *
375drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
376 struct drm_device *dev, size_t size,
377 uint32_t *handle)
378{
379 struct drm_gem_shmem_object *shmem;
380 int ret;
381
382 shmem = drm_gem_shmem_create(dev, size);
383 if (IS_ERR(shmem))
384 return shmem;
385
386 /*
387 * Allocate an id of idr table where the obj is registered
388 * and handle has the id what user can see.
389 */
390 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
391 /* drop reference from allocate - handle holds it now. */
392 drm_gem_object_put(&shmem->base);
393 if (ret)
394 return ERR_PTR(ret);
395
396 return shmem;
397}
398EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
399
400/* Update madvise status, returns true if not purged, else
401 * false or -errno.
402 */
403int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
404{
405 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
406
407 mutex_lock(&shmem->pages_lock);
408
409 if (shmem->madv >= 0)
410 shmem->madv = madv;
411
412 madv = shmem->madv;
413
414 mutex_unlock(&shmem->pages_lock);
415
416 return (madv >= 0);
417}
418EXPORT_SYMBOL(drm_gem_shmem_madvise);
419
420void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
421{
422 struct drm_device *dev = obj->dev;
423 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
424
425 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
426
427 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
428 shmem->sgt->nents, DMA_BIDIRECTIONAL);
429 sg_free_table(shmem->sgt);
430 kfree(shmem->sgt);
431 shmem->sgt = NULL;
432
433 drm_gem_shmem_put_pages_locked(shmem);
434
435 shmem->madv = -1;
436
437 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
438 drm_gem_free_mmap_offset(obj);
439
440 /* Our goal here is to return as much of the memory as
441 * is possible back to the system as we are called from OOM.
442 * To do this we must instruct the shmfs to drop all of its
443 * backing pages, *now*.
444 */
445 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
446
447 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
448 0, (loff_t)-1);
449}
450EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
451
452bool drm_gem_shmem_purge(struct drm_gem_object *obj)
453{
454 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
455
456 if (!mutex_trylock(&shmem->pages_lock))
457 return false;
458 drm_gem_shmem_purge_locked(obj);
459 mutex_unlock(&shmem->pages_lock);
460
461 return true;
462}
463EXPORT_SYMBOL(drm_gem_shmem_purge);
464
465/**
466 * drm_gem_shmem_create_object_cached - Create a shmem buffer object with
467 * cached mappings
468 * @dev: DRM device
469 * @size: Size of the object to allocate
470 *
471 * By default, shmem buffer objects use writecombine mappings. This
472 * function implements struct drm_driver.gem_create_object for shmem
473 * buffer objects with cached mappings.
474 *
475 * Returns:
476 * A struct drm_gem_shmem_object * on success or NULL negative on failure.
477 */
478struct drm_gem_object *
479drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size)
480{
481 struct drm_gem_shmem_object *shmem;
482
483 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
484 if (!shmem)
485 return NULL;
486 shmem->map_cached = true;
487
488 return &shmem->base;
489}
490EXPORT_SYMBOL(drm_gem_shmem_create_object_cached);
491
492/**
493 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
494 * @file: DRM file structure to create the dumb buffer for
495 * @dev: DRM device
496 * @args: IOCTL data
497 *
498 * This function computes the pitch of the dumb buffer and rounds it up to an
499 * integer number of bytes per pixel. Drivers for hardware that doesn't have
500 * any additional restrictions on the pitch can directly use this function as
501 * their &drm_driver.dumb_create callback.
502 *
503 * For hardware with additional restrictions, drivers can adjust the fields
504 * set up by userspace before calling into this function.
505 *
506 * Returns:
507 * 0 on success or a negative error code on failure.
508 */
509int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
510 struct drm_mode_create_dumb *args)
511{
512 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
513 struct drm_gem_shmem_object *shmem;
514
515 if (!args->pitch || !args->size) {
516 args->pitch = min_pitch;
517 args->size = args->pitch * args->height;
518 } else {
519 /* ensure sane minimum values */
520 if (args->pitch < min_pitch)
521 args->pitch = min_pitch;
522 if (args->size < args->pitch * args->height)
523 args->size = args->pitch * args->height;
524 }
525
526 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
527
528 return PTR_ERR_OR_ZERO(shmem);
529}
530EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
531
532static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
533{
534 struct vm_area_struct *vma = vmf->vma;
535 struct drm_gem_object *obj = vma->vm_private_data;
536 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
537 loff_t num_pages = obj->size >> PAGE_SHIFT;
538 struct page *page;
539
540 if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
541 return VM_FAULT_SIGBUS;
542
543 page = shmem->pages[vmf->pgoff];
544
545 return vmf_insert_page(vma, vmf->address, page);
546}
547
548static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
549{
550 struct drm_gem_object *obj = vma->vm_private_data;
551 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
552 int ret;
553
554 WARN_ON(shmem->base.import_attach);
555
556 ret = drm_gem_shmem_get_pages(shmem);
557 WARN_ON_ONCE(ret != 0);
558
559 drm_gem_vm_open(vma);
560}
561
562static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
563{
564 struct drm_gem_object *obj = vma->vm_private_data;
565 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
566
567 drm_gem_shmem_put_pages(shmem);
568 drm_gem_vm_close(vma);
569}
570
571static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
572 .fault = drm_gem_shmem_fault,
573 .open = drm_gem_shmem_vm_open,
574 .close = drm_gem_shmem_vm_close,
575};
576
577/**
578 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
579 * @obj: gem object
580 * @vma: VMA for the area to be mapped
581 *
582 * This function implements an augmented version of the GEM DRM file mmap
583 * operation for shmem objects. Drivers which employ the shmem helpers should
584 * use this function as their &drm_gem_object_funcs.mmap handler.
585 *
586 * Returns:
587 * 0 on success or a negative error code on failure.
588 */
589int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
590{
591 struct drm_gem_shmem_object *shmem;
592 int ret;
593
594 /* Remove the fake offset */
595 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
596
597 if (obj->import_attach)
598 return dma_buf_mmap(obj->dma_buf, vma, 0);
599
600 shmem = to_drm_gem_shmem_obj(obj);
601
602 ret = drm_gem_shmem_get_pages(shmem);
603 if (ret) {
604 drm_gem_vm_close(vma);
605 return ret;
606 }
607
608 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
609 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
610 if (!shmem->map_cached)
611 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
612 vma->vm_ops = &drm_gem_shmem_vm_ops;
613
614 return 0;
615}
616EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
617
618/**
619 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
620 * @p: DRM printer
621 * @indent: Tab indentation level
622 * @obj: GEM object
623 *
624 * This implements the &drm_gem_object_funcs.info callback.
625 */
626void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
627 const struct drm_gem_object *obj)
628{
629 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
630
631 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
632 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
633 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
634}
635EXPORT_SYMBOL(drm_gem_shmem_print_info);
636
637/**
638 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
639 * pages for a shmem GEM object
640 * @obj: GEM object
641 *
642 * This function exports a scatter/gather table suitable for PRIME usage by
643 * calling the standard DMA mapping API. Drivers should not call this function
644 * directly, instead it should only be used as an implementation for
645 * &drm_gem_object_funcs.get_sg_table.
646 *
647 * Drivers who need to acquire an scatter/gather table for objects need to call
648 * drm_gem_shmem_get_pages_sgt() instead.
649 *
650 * Returns:
651 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
652 */
653struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
654{
655 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
656
657 WARN_ON(shmem->base.import_attach);
658
659 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
660}
661EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
662
663/**
664 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
665 * scatter/gather table for a shmem GEM object.
666 * @obj: GEM object
667 *
668 * This function returns a scatter/gather table suitable for driver usage. If
669 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
670 * table created.
671 *
672 * This is the main function for drivers to get at backing storage, and it hides
673 * and difference between dma-buf imported and natively allocated objects.
674 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
675 *
676 * Returns:
677 * A pointer to the scatter/gather table of pinned pages or errno on failure.
678 */
679struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
680{
681 int ret;
682 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
683 struct sg_table *sgt;
684
685 if (shmem->sgt)
686 return shmem->sgt;
687
688 WARN_ON(obj->import_attach);
689
690 ret = drm_gem_shmem_get_pages(shmem);
691 if (ret)
692 return ERR_PTR(ret);
693
694 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
695 if (IS_ERR(sgt)) {
696 ret = PTR_ERR(sgt);
697 goto err_put_pages;
698 }
699 /* Map the pages for use by the h/w. */
700 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
701
702 shmem->sgt = sgt;
703
704 return sgt;
705
706err_put_pages:
707 drm_gem_shmem_put_pages(shmem);
708 return ERR_PTR(ret);
709}
710EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
711
712/**
713 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
714 * another driver's scatter/gather table of pinned pages
715 * @dev: Device to import into
716 * @attach: DMA-BUF attachment
717 * @sgt: Scatter/gather table of pinned pages
718 *
719 * This function imports a scatter/gather table exported via DMA-BUF by
720 * another driver. Drivers that use the shmem helpers should set this as their
721 * &drm_driver.gem_prime_import_sg_table callback.
722 *
723 * Returns:
724 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
725 * error code on failure.
726 */
727struct drm_gem_object *
728drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
729 struct dma_buf_attachment *attach,
730 struct sg_table *sgt)
731{
732 size_t size = PAGE_ALIGN(attach->dmabuf->size);
733 struct drm_gem_shmem_object *shmem;
734
735 shmem = __drm_gem_shmem_create(dev, size, true);
736 if (IS_ERR(shmem))
737 return ERR_CAST(shmem);
738
739 shmem->sgt = sgt;
740
741 DRM_DEBUG_PRIME("size = %zu\n", size);
742
743 return &shmem->base;
744}
745EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);