Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright 2018 Noralf Trønnes
  4 */
  5
  6#include <linux/dma-buf.h>
  7#include <linux/export.h>
  8#include <linux/module.h>
  9#include <linux/mutex.h>
 10#include <linux/shmem_fs.h>
 11#include <linux/slab.h>
 12#include <linux/vmalloc.h>
 13#include <linux/module.h>
 14
 15#ifdef CONFIG_X86
 16#include <asm/set_memory.h>
 17#endif
 18
 19#include <drm/drm.h>
 20#include <drm/drm_device.h>
 21#include <drm/drm_drv.h>
 22#include <drm/drm_gem_shmem_helper.h>
 23#include <drm/drm_prime.h>
 24#include <drm/drm_print.h>
 25
 26MODULE_IMPORT_NS(DMA_BUF);
 27
 28/**
 29 * DOC: overview
 30 *
 31 * This library provides helpers for GEM objects backed by shmem buffers
 32 * allocated using anonymous pageable memory.
 33 *
 34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
 35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
 36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
 37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
 38 */
 39
 40static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
 41	.free = drm_gem_shmem_object_free,
 42	.print_info = drm_gem_shmem_object_print_info,
 43	.pin = drm_gem_shmem_object_pin,
 44	.unpin = drm_gem_shmem_object_unpin,
 45	.get_sg_table = drm_gem_shmem_object_get_sg_table,
 46	.vmap = drm_gem_shmem_object_vmap,
 47	.vunmap = drm_gem_shmem_object_vunmap,
 48	.mmap = drm_gem_shmem_object_mmap,
 49	.vm_ops = &drm_gem_shmem_vm_ops,
 50};
 51
 52static struct drm_gem_shmem_object *
 53__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
 
 54{
 55	struct drm_gem_shmem_object *shmem;
 56	struct drm_gem_object *obj;
 57	int ret = 0;
 58
 59	size = PAGE_ALIGN(size);
 60
 61	if (dev->driver->gem_create_object) {
 62		obj = dev->driver->gem_create_object(dev, size);
 63		if (IS_ERR(obj))
 64			return ERR_CAST(obj);
 65		shmem = to_drm_gem_shmem_obj(obj);
 66	} else {
 67		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
 68		if (!shmem)
 69			return ERR_PTR(-ENOMEM);
 70		obj = &shmem->base;
 71	}
 72
 73	if (!obj->funcs)
 74		obj->funcs = &drm_gem_shmem_funcs;
 75
 76	if (private) {
 77		drm_gem_private_object_init(dev, obj, size);
 78		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
 79	} else {
 80		ret = drm_gem_object_init(dev, obj, size);
 81	}
 82	if (ret) {
 83		drm_gem_private_object_fini(obj);
 84		goto err_free;
 85	}
 86
 87	ret = drm_gem_create_mmap_offset(obj);
 88	if (ret)
 89		goto err_release;
 90
 91	INIT_LIST_HEAD(&shmem->madv_list);
 92
 93	if (!private) {
 94		/*
 95		 * Our buffers are kept pinned, so allocating them
 96		 * from the MOVABLE zone is a really bad idea, and
 97		 * conflicts with CMA. See comments above new_inode()
 98		 * why this is required _and_ expected if you're
 99		 * going to pin these pages.
100		 */
101		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103	}
104
105	return shmem;
106
107err_release:
108	drm_gem_object_release(obj);
109err_free:
110	kfree(obj);
111
112	return ERR_PTR(ret);
113}
114/**
115 * drm_gem_shmem_create - Allocate an object with the given size
116 * @dev: DRM device
117 * @size: Size of the object to allocate
118 *
119 * This function creates a shmem GEM object.
120 *
121 * Returns:
122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123 * error code on failure.
124 */
125struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126{
127	return __drm_gem_shmem_create(dev, size, false);
128}
129EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130
131/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
133 * @shmem: shmem GEM object to free
134 *
135 * This function cleans up the GEM object state and frees the memory used to
136 * store the object itself.
137 */
138void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
139{
140	struct drm_gem_object *obj = &shmem->base;
141
142	if (obj->import_attach) {
143		drm_prime_gem_destroy(obj, shmem->sgt);
144	} else {
145		dma_resv_lock(shmem->base.resv, NULL);
146
147		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
148
149		if (shmem->sgt) {
150			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
151					  DMA_BIDIRECTIONAL, 0);
152			sg_free_table(shmem->sgt);
153			kfree(shmem->sgt);
154		}
155		if (shmem->pages)
156			drm_gem_shmem_put_pages(shmem);
157
158		drm_WARN_ON(obj->dev, shmem->pages_use_count);
159
160		dma_resv_unlock(shmem->base.resv);
161	}
162
163	drm_gem_object_release(obj);
164	kfree(shmem);
165}
166EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
167
168static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
169{
170	struct drm_gem_object *obj = &shmem->base;
171	struct page **pages;
172
173	dma_resv_assert_held(shmem->base.resv);
174
175	if (shmem->pages_use_count++ > 0)
176		return 0;
177
178	pages = drm_gem_get_pages(obj);
179	if (IS_ERR(pages)) {
180		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
181			    PTR_ERR(pages));
182		shmem->pages_use_count = 0;
183		return PTR_ERR(pages);
184	}
185
186	/*
187	 * TODO: Allocating WC pages which are correctly flushed is only
188	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
189	 * ttm_pool.c could use.
190	 */
191#ifdef CONFIG_X86
192	if (shmem->map_wc)
193		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
194#endif
195
196	shmem->pages = pages;
197
198	return 0;
199}
200
201/*
202 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
203 * @shmem: shmem GEM object
204 *
205 * This function decreases the use count and puts the backing pages when use drops to zero.
206 */
207void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
208{
209	struct drm_gem_object *obj = &shmem->base;
210
211	dma_resv_assert_held(shmem->base.resv);
212
213	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
214		return;
215
216	if (--shmem->pages_use_count > 0)
217		return;
218
219#ifdef CONFIG_X86
220	if (shmem->map_wc)
221		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
222#endif
223
224	drm_gem_put_pages(obj, shmem->pages,
225			  shmem->pages_mark_dirty_on_put,
226			  shmem->pages_mark_accessed_on_put);
227	shmem->pages = NULL;
228}
229EXPORT_SYMBOL(drm_gem_shmem_put_pages);
230
231static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
232{
233	int ret;
234
235	dma_resv_assert_held(shmem->base.resv);
236
 
 
237	ret = drm_gem_shmem_get_pages(shmem);
238
239	return ret;
240}
 
241
242static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
243{
244	dma_resv_assert_held(shmem->base.resv);
245
246	drm_gem_shmem_put_pages(shmem);
247}
 
248
249/**
250 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
251 * @shmem: shmem GEM object
252 *
253 * This function makes sure the backing pages are pinned in memory while the
254 * buffer is exported.
255 *
256 * Returns:
257 * 0 on success or a negative error code on failure.
258 */
259int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
260{
261	struct drm_gem_object *obj = &shmem->base;
262	int ret;
263
264	drm_WARN_ON(obj->dev, obj->import_attach);
265
266	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
267	if (ret)
268		return ret;
269	ret = drm_gem_shmem_pin_locked(shmem);
270	dma_resv_unlock(shmem->base.resv);
271
272	return ret;
273}
274EXPORT_SYMBOL(drm_gem_shmem_pin);
275
276/**
277 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
278 * @shmem: shmem GEM object
279 *
280 * This function removes the requirement that the backing pages are pinned in
281 * memory.
282 */
283void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
284{
285	struct drm_gem_object *obj = &shmem->base;
286
287	drm_WARN_ON(obj->dev, obj->import_attach);
288
289	dma_resv_lock(shmem->base.resv, NULL);
290	drm_gem_shmem_unpin_locked(shmem);
291	dma_resv_unlock(shmem->base.resv);
292}
293EXPORT_SYMBOL(drm_gem_shmem_unpin);
294
295/*
296 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
297 * @shmem: shmem GEM object
298 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
299 *       store.
300 *
301 * This function makes sure that a contiguous kernel virtual address mapping
302 * exists for the buffer backing the shmem GEM object. It hides the differences
303 * between dma-buf imported and natively allocated objects.
304 *
305 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
306 *
307 * Returns:
308 * 0 on success or a negative error code on failure.
309 */
310int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
311		       struct iosys_map *map)
312{
313	struct drm_gem_object *obj = &shmem->base;
314	int ret = 0;
315
316	if (obj->import_attach) {
317		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
318		if (!ret) {
319			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
320				dma_buf_vunmap(obj->import_attach->dmabuf, map);
321				return -EIO;
322			}
323		}
324	} else {
325		pgprot_t prot = PAGE_KERNEL;
326
327		dma_resv_assert_held(shmem->base.resv);
328
329		if (shmem->vmap_use_count++ > 0) {
330			iosys_map_set_vaddr(map, shmem->vaddr);
331			return 0;
332		}
333
334		ret = drm_gem_shmem_get_pages(shmem);
335		if (ret)
336			goto err_zero_use;
337
338		if (shmem->map_wc)
339			prot = pgprot_writecombine(prot);
340		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
341				    VM_MAP, prot);
342		if (!shmem->vaddr)
343			ret = -ENOMEM;
344		else
345			iosys_map_set_vaddr(map, shmem->vaddr);
346	}
347
348	if (ret) {
349		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
350		goto err_put_pages;
351	}
352
353	return 0;
354
355err_put_pages:
356	if (!obj->import_attach)
357		drm_gem_shmem_put_pages(shmem);
358err_zero_use:
359	shmem->vmap_use_count = 0;
360
361	return ret;
362}
363EXPORT_SYMBOL(drm_gem_shmem_vmap);
364
365/*
366 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
367 * @shmem: shmem GEM object
368 * @map: Kernel virtual address where the SHMEM GEM object was mapped
369 *
370 * This function cleans up a kernel virtual address mapping acquired by
371 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
372 * zero.
373 *
374 * This function hides the differences between dma-buf imported and natively
375 * allocated objects.
376 */
377void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
378			  struct iosys_map *map)
379{
380	struct drm_gem_object *obj = &shmem->base;
381
382	if (obj->import_attach) {
383		dma_buf_vunmap(obj->import_attach->dmabuf, map);
384	} else {
385		dma_resv_assert_held(shmem->base.resv);
386
387		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
388			return;
389
390		if (--shmem->vmap_use_count > 0)
391			return;
392
393		vunmap(shmem->vaddr);
394		drm_gem_shmem_put_pages(shmem);
395	}
396
397	shmem->vaddr = NULL;
398}
399EXPORT_SYMBOL(drm_gem_shmem_vunmap);
400
401static int
402drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
403				 struct drm_device *dev, size_t size,
404				 uint32_t *handle)
405{
406	struct drm_gem_shmem_object *shmem;
407	int ret;
408
409	shmem = drm_gem_shmem_create(dev, size);
410	if (IS_ERR(shmem))
411		return PTR_ERR(shmem);
412
413	/*
414	 * Allocate an id of idr table where the obj is registered
415	 * and handle has the id what user can see.
416	 */
417	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
418	/* drop reference from allocate - handle holds it now. */
419	drm_gem_object_put(&shmem->base);
420
421	return ret;
422}
423
424/* Update madvise status, returns true if not purged, else
425 * false or -errno.
426 */
427int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
428{
429	dma_resv_assert_held(shmem->base.resv);
430
431	if (shmem->madv >= 0)
432		shmem->madv = madv;
433
434	madv = shmem->madv;
435
436	return (madv >= 0);
437}
438EXPORT_SYMBOL(drm_gem_shmem_madvise);
439
440void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
441{
442	struct drm_gem_object *obj = &shmem->base;
443	struct drm_device *dev = obj->dev;
444
445	dma_resv_assert_held(shmem->base.resv);
446
447	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
448
449	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
450	sg_free_table(shmem->sgt);
451	kfree(shmem->sgt);
452	shmem->sgt = NULL;
453
454	drm_gem_shmem_put_pages(shmem);
455
456	shmem->madv = -1;
457
458	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
459	drm_gem_free_mmap_offset(obj);
460
461	/* Our goal here is to return as much of the memory as
462	 * is possible back to the system as we are called from OOM.
463	 * To do this we must instruct the shmfs to drop all of its
464	 * backing pages, *now*.
465	 */
466	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
467
468	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
469}
470EXPORT_SYMBOL(drm_gem_shmem_purge);
471
472/**
473 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
474 * @file: DRM file structure to create the dumb buffer for
475 * @dev: DRM device
476 * @args: IOCTL data
477 *
478 * This function computes the pitch of the dumb buffer and rounds it up to an
479 * integer number of bytes per pixel. Drivers for hardware that doesn't have
480 * any additional restrictions on the pitch can directly use this function as
481 * their &drm_driver.dumb_create callback.
482 *
483 * For hardware with additional restrictions, drivers can adjust the fields
484 * set up by userspace before calling into this function.
485 *
486 * Returns:
487 * 0 on success or a negative error code on failure.
488 */
489int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
490			      struct drm_mode_create_dumb *args)
491{
492	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
493
494	if (!args->pitch || !args->size) {
495		args->pitch = min_pitch;
496		args->size = PAGE_ALIGN(args->pitch * args->height);
497	} else {
498		/* ensure sane minimum values */
499		if (args->pitch < min_pitch)
500			args->pitch = min_pitch;
501		if (args->size < args->pitch * args->height)
502			args->size = PAGE_ALIGN(args->pitch * args->height);
503	}
504
505	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
506}
507EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
508
509static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
510{
511	struct vm_area_struct *vma = vmf->vma;
512	struct drm_gem_object *obj = vma->vm_private_data;
513	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
514	loff_t num_pages = obj->size >> PAGE_SHIFT;
515	vm_fault_t ret;
516	struct page *page;
517	pgoff_t page_offset;
518
519	/* We don't use vmf->pgoff since that has the fake offset */
520	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
521
522	dma_resv_lock(shmem->base.resv, NULL);
523
524	if (page_offset >= num_pages ||
525	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
526	    shmem->madv < 0) {
527		ret = VM_FAULT_SIGBUS;
528	} else {
529		page = shmem->pages[page_offset];
530
531		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
532	}
533
534	dma_resv_unlock(shmem->base.resv);
535
536	return ret;
537}
538
539static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
540{
541	struct drm_gem_object *obj = vma->vm_private_data;
542	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
543
544	drm_WARN_ON(obj->dev, obj->import_attach);
545
546	dma_resv_lock(shmem->base.resv, NULL);
547
548	/*
549	 * We should have already pinned the pages when the buffer was first
550	 * mmap'd, vm_open() just grabs an additional reference for the new
551	 * mm the vma is getting copied into (ie. on fork()).
552	 */
553	if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
554		shmem->pages_use_count++;
555
556	dma_resv_unlock(shmem->base.resv);
557
558	drm_gem_vm_open(vma);
559}
560
561static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
562{
563	struct drm_gem_object *obj = vma->vm_private_data;
564	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
565
566	dma_resv_lock(shmem->base.resv, NULL);
567	drm_gem_shmem_put_pages(shmem);
568	dma_resv_unlock(shmem->base.resv);
569
570	drm_gem_vm_close(vma);
571}
572
573const struct vm_operations_struct drm_gem_shmem_vm_ops = {
574	.fault = drm_gem_shmem_fault,
575	.open = drm_gem_shmem_vm_open,
576	.close = drm_gem_shmem_vm_close,
577};
578EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
579
580/**
581 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
582 * @shmem: shmem GEM object
583 * @vma: VMA for the area to be mapped
584 *
585 * This function implements an augmented version of the GEM DRM file mmap
586 * operation for shmem objects.
587 *
588 * Returns:
589 * 0 on success or a negative error code on failure.
590 */
591int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
592{
593	struct drm_gem_object *obj = &shmem->base;
594	int ret;
595
596	if (obj->import_attach) {
597		/* Reset both vm_ops and vm_private_data, so we don't end up with
598		 * vm_ops pointing to our implementation if the dma-buf backend
599		 * doesn't set those fields.
600		 */
601		vma->vm_private_data = NULL;
602		vma->vm_ops = NULL;
603
604		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
605
606		/* Drop the reference drm_gem_mmap_obj() acquired.*/
607		if (!ret)
608			drm_gem_object_put(obj);
609
610		return ret;
611	}
612
 
 
 
613	dma_resv_lock(shmem->base.resv, NULL);
614	ret = drm_gem_shmem_get_pages(shmem);
615	dma_resv_unlock(shmem->base.resv);
616
617	if (ret)
618		return ret;
619
620	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
621	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
622	if (shmem->map_wc)
623		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
624
625	return 0;
626}
627EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
628
629/**
630 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
631 * @shmem: shmem GEM object
632 * @p: DRM printer
633 * @indent: Tab indentation level
634 */
635void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
636			      struct drm_printer *p, unsigned int indent)
637{
638	if (shmem->base.import_attach)
639		return;
640
641	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
642	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
643	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
644}
645EXPORT_SYMBOL(drm_gem_shmem_print_info);
646
647/**
648 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
649 *                              pages for a shmem GEM object
650 * @shmem: shmem GEM object
651 *
652 * This function exports a scatter/gather table suitable for PRIME usage by
653 * calling the standard DMA mapping API.
654 *
655 * Drivers who need to acquire an scatter/gather table for objects need to call
656 * drm_gem_shmem_get_pages_sgt() instead.
657 *
658 * Returns:
659 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
660 */
661struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
662{
663	struct drm_gem_object *obj = &shmem->base;
664
665	drm_WARN_ON(obj->dev, obj->import_attach);
666
667	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
668}
669EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
670
671static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
672{
673	struct drm_gem_object *obj = &shmem->base;
674	int ret;
675	struct sg_table *sgt;
676
677	if (shmem->sgt)
678		return shmem->sgt;
679
680	drm_WARN_ON(obj->dev, obj->import_attach);
681
682	ret = drm_gem_shmem_get_pages(shmem);
683	if (ret)
684		return ERR_PTR(ret);
685
686	sgt = drm_gem_shmem_get_sg_table(shmem);
687	if (IS_ERR(sgt)) {
688		ret = PTR_ERR(sgt);
689		goto err_put_pages;
690	}
691	/* Map the pages for use by the h/w. */
692	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
693	if (ret)
694		goto err_free_sgt;
695
696	shmem->sgt = sgt;
697
698	return sgt;
699
700err_free_sgt:
701	sg_free_table(sgt);
702	kfree(sgt);
703err_put_pages:
704	drm_gem_shmem_put_pages(shmem);
705	return ERR_PTR(ret);
706}
707
708/**
709 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
710 *				 scatter/gather table for a shmem GEM object.
711 * @shmem: shmem GEM object
712 *
713 * This function returns a scatter/gather table suitable for driver usage. If
714 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
715 * table created.
716 *
717 * This is the main function for drivers to get at backing storage, and it hides
718 * and difference between dma-buf imported and natively allocated objects.
719 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
720 *
721 * Returns:
722 * A pointer to the scatter/gather table of pinned pages or errno on failure.
723 */
724struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
725{
726	int ret;
727	struct sg_table *sgt;
728
729	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
730	if (ret)
731		return ERR_PTR(ret);
732	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
733	dma_resv_unlock(shmem->base.resv);
734
735	return sgt;
736}
737EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
738
739/**
740 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
741 *                 another driver's scatter/gather table of pinned pages
742 * @dev: Device to import into
743 * @attach: DMA-BUF attachment
744 * @sgt: Scatter/gather table of pinned pages
745 *
746 * This function imports a scatter/gather table exported via DMA-BUF by
747 * another driver. Drivers that use the shmem helpers should set this as their
748 * &drm_driver.gem_prime_import_sg_table callback.
749 *
750 * Returns:
751 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
752 * error code on failure.
753 */
754struct drm_gem_object *
755drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
756				    struct dma_buf_attachment *attach,
757				    struct sg_table *sgt)
758{
759	size_t size = PAGE_ALIGN(attach->dmabuf->size);
760	struct drm_gem_shmem_object *shmem;
761
762	shmem = __drm_gem_shmem_create(dev, size, true);
763	if (IS_ERR(shmem))
764		return ERR_CAST(shmem);
765
766	shmem->sgt = sgt;
767
768	drm_dbg_prime(dev, "size = %zu\n", size);
769
770	return &shmem->base;
771}
772EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
773
774MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
775MODULE_IMPORT_NS(DMA_BUF);
776MODULE_LICENSE("GPL v2");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright 2018 Noralf Trønnes
  4 */
  5
  6#include <linux/dma-buf.h>
  7#include <linux/export.h>
  8#include <linux/module.h>
  9#include <linux/mutex.h>
 10#include <linux/shmem_fs.h>
 11#include <linux/slab.h>
 12#include <linux/vmalloc.h>
 
 13
 14#ifdef CONFIG_X86
 15#include <asm/set_memory.h>
 16#endif
 17
 18#include <drm/drm.h>
 19#include <drm/drm_device.h>
 20#include <drm/drm_drv.h>
 21#include <drm/drm_gem_shmem_helper.h>
 22#include <drm/drm_prime.h>
 23#include <drm/drm_print.h>
 24
 25MODULE_IMPORT_NS("DMA_BUF");
 26
 27/**
 28 * DOC: overview
 29 *
 30 * This library provides helpers for GEM objects backed by shmem buffers
 31 * allocated using anonymous pageable memory.
 32 *
 33 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
 34 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
 35 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
 36 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
 37 */
 38
 39static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
 40	.free = drm_gem_shmem_object_free,
 41	.print_info = drm_gem_shmem_object_print_info,
 42	.pin = drm_gem_shmem_object_pin,
 43	.unpin = drm_gem_shmem_object_unpin,
 44	.get_sg_table = drm_gem_shmem_object_get_sg_table,
 45	.vmap = drm_gem_shmem_object_vmap,
 46	.vunmap = drm_gem_shmem_object_vunmap,
 47	.mmap = drm_gem_shmem_object_mmap,
 48	.vm_ops = &drm_gem_shmem_vm_ops,
 49};
 50
 51static struct drm_gem_shmem_object *
 52__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
 53		       struct vfsmount *gemfs)
 54{
 55	struct drm_gem_shmem_object *shmem;
 56	struct drm_gem_object *obj;
 57	int ret = 0;
 58
 59	size = PAGE_ALIGN(size);
 60
 61	if (dev->driver->gem_create_object) {
 62		obj = dev->driver->gem_create_object(dev, size);
 63		if (IS_ERR(obj))
 64			return ERR_CAST(obj);
 65		shmem = to_drm_gem_shmem_obj(obj);
 66	} else {
 67		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
 68		if (!shmem)
 69			return ERR_PTR(-ENOMEM);
 70		obj = &shmem->base;
 71	}
 72
 73	if (!obj->funcs)
 74		obj->funcs = &drm_gem_shmem_funcs;
 75
 76	if (private) {
 77		drm_gem_private_object_init(dev, obj, size);
 78		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
 79	} else {
 80		ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
 81	}
 82	if (ret) {
 83		drm_gem_private_object_fini(obj);
 84		goto err_free;
 85	}
 86
 87	ret = drm_gem_create_mmap_offset(obj);
 88	if (ret)
 89		goto err_release;
 90
 91	INIT_LIST_HEAD(&shmem->madv_list);
 92
 93	if (!private) {
 94		/*
 95		 * Our buffers are kept pinned, so allocating them
 96		 * from the MOVABLE zone is a really bad idea, and
 97		 * conflicts with CMA. See comments above new_inode()
 98		 * why this is required _and_ expected if you're
 99		 * going to pin these pages.
100		 */
101		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103	}
104
105	return shmem;
106
107err_release:
108	drm_gem_object_release(obj);
109err_free:
110	kfree(obj);
111
112	return ERR_PTR(ret);
113}
114/**
115 * drm_gem_shmem_create - Allocate an object with the given size
116 * @dev: DRM device
117 * @size: Size of the object to allocate
118 *
119 * This function creates a shmem GEM object.
120 *
121 * Returns:
122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123 * error code on failure.
124 */
125struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126{
127	return __drm_gem_shmem_create(dev, size, false, NULL);
128}
129EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130
131/**
132 * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
133 * given mountpoint
134 * @dev: DRM device
135 * @size: Size of the object to allocate
136 * @gemfs: tmpfs mount where the GEM object will be created
137 *
138 * This function creates a shmem GEM object in a given tmpfs mountpoint.
139 *
140 * Returns:
141 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
142 * error code on failure.
143 */
144struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
145							   size_t size,
146							   struct vfsmount *gemfs)
147{
148	return __drm_gem_shmem_create(dev, size, false, gemfs);
149}
150EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
151
152/**
153 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
154 * @shmem: shmem GEM object to free
155 *
156 * This function cleans up the GEM object state and frees the memory used to
157 * store the object itself.
158 */
159void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
160{
161	struct drm_gem_object *obj = &shmem->base;
162
163	if (obj->import_attach) {
164		drm_prime_gem_destroy(obj, shmem->sgt);
165	} else {
166		dma_resv_lock(shmem->base.resv, NULL);
167
168		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
169
170		if (shmem->sgt) {
171			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
172					  DMA_BIDIRECTIONAL, 0);
173			sg_free_table(shmem->sgt);
174			kfree(shmem->sgt);
175		}
176		if (shmem->pages)
177			drm_gem_shmem_put_pages(shmem);
178
179		drm_WARN_ON(obj->dev, shmem->pages_use_count);
180
181		dma_resv_unlock(shmem->base.resv);
182	}
183
184	drm_gem_object_release(obj);
185	kfree(shmem);
186}
187EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
188
189static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
190{
191	struct drm_gem_object *obj = &shmem->base;
192	struct page **pages;
193
194	dma_resv_assert_held(shmem->base.resv);
195
196	if (shmem->pages_use_count++ > 0)
197		return 0;
198
199	pages = drm_gem_get_pages(obj);
200	if (IS_ERR(pages)) {
201		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
202			    PTR_ERR(pages));
203		shmem->pages_use_count = 0;
204		return PTR_ERR(pages);
205	}
206
207	/*
208	 * TODO: Allocating WC pages which are correctly flushed is only
209	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
210	 * ttm_pool.c could use.
211	 */
212#ifdef CONFIG_X86
213	if (shmem->map_wc)
214		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
215#endif
216
217	shmem->pages = pages;
218
219	return 0;
220}
221
222/*
223 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
224 * @shmem: shmem GEM object
225 *
226 * This function decreases the use count and puts the backing pages when use drops to zero.
227 */
228void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
229{
230	struct drm_gem_object *obj = &shmem->base;
231
232	dma_resv_assert_held(shmem->base.resv);
233
234	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
235		return;
236
237	if (--shmem->pages_use_count > 0)
238		return;
239
240#ifdef CONFIG_X86
241	if (shmem->map_wc)
242		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
243#endif
244
245	drm_gem_put_pages(obj, shmem->pages,
246			  shmem->pages_mark_dirty_on_put,
247			  shmem->pages_mark_accessed_on_put);
248	shmem->pages = NULL;
249}
250EXPORT_SYMBOL(drm_gem_shmem_put_pages);
251
252int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
253{
254	int ret;
255
256	dma_resv_assert_held(shmem->base.resv);
257
258	drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
259
260	ret = drm_gem_shmem_get_pages(shmem);
261
262	return ret;
263}
264EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
265
266void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
267{
268	dma_resv_assert_held(shmem->base.resv);
269
270	drm_gem_shmem_put_pages(shmem);
271}
272EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
273
274/**
275 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
276 * @shmem: shmem GEM object
277 *
278 * This function makes sure the backing pages are pinned in memory while the
279 * buffer is exported.
280 *
281 * Returns:
282 * 0 on success or a negative error code on failure.
283 */
284int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
285{
286	struct drm_gem_object *obj = &shmem->base;
287	int ret;
288
289	drm_WARN_ON(obj->dev, obj->import_attach);
290
291	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
292	if (ret)
293		return ret;
294	ret = drm_gem_shmem_pin_locked(shmem);
295	dma_resv_unlock(shmem->base.resv);
296
297	return ret;
298}
299EXPORT_SYMBOL(drm_gem_shmem_pin);
300
301/**
302 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
303 * @shmem: shmem GEM object
304 *
305 * This function removes the requirement that the backing pages are pinned in
306 * memory.
307 */
308void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
309{
310	struct drm_gem_object *obj = &shmem->base;
311
312	drm_WARN_ON(obj->dev, obj->import_attach);
313
314	dma_resv_lock(shmem->base.resv, NULL);
315	drm_gem_shmem_unpin_locked(shmem);
316	dma_resv_unlock(shmem->base.resv);
317}
318EXPORT_SYMBOL(drm_gem_shmem_unpin);
319
320/*
321 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
322 * @shmem: shmem GEM object
323 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
324 *       store.
325 *
326 * This function makes sure that a contiguous kernel virtual address mapping
327 * exists for the buffer backing the shmem GEM object. It hides the differences
328 * between dma-buf imported and natively allocated objects.
329 *
330 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
331 *
332 * Returns:
333 * 0 on success or a negative error code on failure.
334 */
335int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
336		       struct iosys_map *map)
337{
338	struct drm_gem_object *obj = &shmem->base;
339	int ret = 0;
340
341	if (obj->import_attach) {
342		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
343		if (!ret) {
344			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
345				dma_buf_vunmap(obj->import_attach->dmabuf, map);
346				return -EIO;
347			}
348		}
349	} else {
350		pgprot_t prot = PAGE_KERNEL;
351
352		dma_resv_assert_held(shmem->base.resv);
353
354		if (shmem->vmap_use_count++ > 0) {
355			iosys_map_set_vaddr(map, shmem->vaddr);
356			return 0;
357		}
358
359		ret = drm_gem_shmem_get_pages(shmem);
360		if (ret)
361			goto err_zero_use;
362
363		if (shmem->map_wc)
364			prot = pgprot_writecombine(prot);
365		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
366				    VM_MAP, prot);
367		if (!shmem->vaddr)
368			ret = -ENOMEM;
369		else
370			iosys_map_set_vaddr(map, shmem->vaddr);
371	}
372
373	if (ret) {
374		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
375		goto err_put_pages;
376	}
377
378	return 0;
379
380err_put_pages:
381	if (!obj->import_attach)
382		drm_gem_shmem_put_pages(shmem);
383err_zero_use:
384	shmem->vmap_use_count = 0;
385
386	return ret;
387}
388EXPORT_SYMBOL(drm_gem_shmem_vmap);
389
390/*
391 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
392 * @shmem: shmem GEM object
393 * @map: Kernel virtual address where the SHMEM GEM object was mapped
394 *
395 * This function cleans up a kernel virtual address mapping acquired by
396 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
397 * zero.
398 *
399 * This function hides the differences between dma-buf imported and natively
400 * allocated objects.
401 */
402void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
403			  struct iosys_map *map)
404{
405	struct drm_gem_object *obj = &shmem->base;
406
407	if (obj->import_attach) {
408		dma_buf_vunmap(obj->import_attach->dmabuf, map);
409	} else {
410		dma_resv_assert_held(shmem->base.resv);
411
412		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
413			return;
414
415		if (--shmem->vmap_use_count > 0)
416			return;
417
418		vunmap(shmem->vaddr);
419		drm_gem_shmem_put_pages(shmem);
420	}
421
422	shmem->vaddr = NULL;
423}
424EXPORT_SYMBOL(drm_gem_shmem_vunmap);
425
426static int
427drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
428				 struct drm_device *dev, size_t size,
429				 uint32_t *handle)
430{
431	struct drm_gem_shmem_object *shmem;
432	int ret;
433
434	shmem = drm_gem_shmem_create(dev, size);
435	if (IS_ERR(shmem))
436		return PTR_ERR(shmem);
437
438	/*
439	 * Allocate an id of idr table where the obj is registered
440	 * and handle has the id what user can see.
441	 */
442	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
443	/* drop reference from allocate - handle holds it now. */
444	drm_gem_object_put(&shmem->base);
445
446	return ret;
447}
448
449/* Update madvise status, returns true if not purged, else
450 * false or -errno.
451 */
452int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
453{
454	dma_resv_assert_held(shmem->base.resv);
455
456	if (shmem->madv >= 0)
457		shmem->madv = madv;
458
459	madv = shmem->madv;
460
461	return (madv >= 0);
462}
463EXPORT_SYMBOL(drm_gem_shmem_madvise);
464
465void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
466{
467	struct drm_gem_object *obj = &shmem->base;
468	struct drm_device *dev = obj->dev;
469
470	dma_resv_assert_held(shmem->base.resv);
471
472	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
473
474	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
475	sg_free_table(shmem->sgt);
476	kfree(shmem->sgt);
477	shmem->sgt = NULL;
478
479	drm_gem_shmem_put_pages(shmem);
480
481	shmem->madv = -1;
482
483	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
484	drm_gem_free_mmap_offset(obj);
485
486	/* Our goal here is to return as much of the memory as
487	 * is possible back to the system as we are called from OOM.
488	 * To do this we must instruct the shmfs to drop all of its
489	 * backing pages, *now*.
490	 */
491	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
492
493	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
494}
495EXPORT_SYMBOL(drm_gem_shmem_purge);
496
497/**
498 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
499 * @file: DRM file structure to create the dumb buffer for
500 * @dev: DRM device
501 * @args: IOCTL data
502 *
503 * This function computes the pitch of the dumb buffer and rounds it up to an
504 * integer number of bytes per pixel. Drivers for hardware that doesn't have
505 * any additional restrictions on the pitch can directly use this function as
506 * their &drm_driver.dumb_create callback.
507 *
508 * For hardware with additional restrictions, drivers can adjust the fields
509 * set up by userspace before calling into this function.
510 *
511 * Returns:
512 * 0 on success or a negative error code on failure.
513 */
514int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
515			      struct drm_mode_create_dumb *args)
516{
517	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
518
519	if (!args->pitch || !args->size) {
520		args->pitch = min_pitch;
521		args->size = PAGE_ALIGN(args->pitch * args->height);
522	} else {
523		/* ensure sane minimum values */
524		if (args->pitch < min_pitch)
525			args->pitch = min_pitch;
526		if (args->size < args->pitch * args->height)
527			args->size = PAGE_ALIGN(args->pitch * args->height);
528	}
529
530	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
531}
532EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
533
534static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
535{
536	struct vm_area_struct *vma = vmf->vma;
537	struct drm_gem_object *obj = vma->vm_private_data;
538	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
539	loff_t num_pages = obj->size >> PAGE_SHIFT;
540	vm_fault_t ret;
541	struct page *page;
542	pgoff_t page_offset;
543
544	/* We don't use vmf->pgoff since that has the fake offset */
545	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
546
547	dma_resv_lock(shmem->base.resv, NULL);
548
549	if (page_offset >= num_pages ||
550	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
551	    shmem->madv < 0) {
552		ret = VM_FAULT_SIGBUS;
553	} else {
554		page = shmem->pages[page_offset];
555
556		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
557	}
558
559	dma_resv_unlock(shmem->base.resv);
560
561	return ret;
562}
563
564static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
565{
566	struct drm_gem_object *obj = vma->vm_private_data;
567	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
568
569	drm_WARN_ON(obj->dev, obj->import_attach);
570
571	dma_resv_lock(shmem->base.resv, NULL);
572
573	/*
574	 * We should have already pinned the pages when the buffer was first
575	 * mmap'd, vm_open() just grabs an additional reference for the new
576	 * mm the vma is getting copied into (ie. on fork()).
577	 */
578	if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
579		shmem->pages_use_count++;
580
581	dma_resv_unlock(shmem->base.resv);
582
583	drm_gem_vm_open(vma);
584}
585
586static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
587{
588	struct drm_gem_object *obj = vma->vm_private_data;
589	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
590
591	dma_resv_lock(shmem->base.resv, NULL);
592	drm_gem_shmem_put_pages(shmem);
593	dma_resv_unlock(shmem->base.resv);
594
595	drm_gem_vm_close(vma);
596}
597
598const struct vm_operations_struct drm_gem_shmem_vm_ops = {
599	.fault = drm_gem_shmem_fault,
600	.open = drm_gem_shmem_vm_open,
601	.close = drm_gem_shmem_vm_close,
602};
603EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
604
605/**
606 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
607 * @shmem: shmem GEM object
608 * @vma: VMA for the area to be mapped
609 *
610 * This function implements an augmented version of the GEM DRM file mmap
611 * operation for shmem objects.
612 *
613 * Returns:
614 * 0 on success or a negative error code on failure.
615 */
616int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
617{
618	struct drm_gem_object *obj = &shmem->base;
619	int ret;
620
621	if (obj->import_attach) {
622		/* Reset both vm_ops and vm_private_data, so we don't end up with
623		 * vm_ops pointing to our implementation if the dma-buf backend
624		 * doesn't set those fields.
625		 */
626		vma->vm_private_data = NULL;
627		vma->vm_ops = NULL;
628
629		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
630
631		/* Drop the reference drm_gem_mmap_obj() acquired.*/
632		if (!ret)
633			drm_gem_object_put(obj);
634
635		return ret;
636	}
637
638	if (is_cow_mapping(vma->vm_flags))
639		return -EINVAL;
640
641	dma_resv_lock(shmem->base.resv, NULL);
642	ret = drm_gem_shmem_get_pages(shmem);
643	dma_resv_unlock(shmem->base.resv);
644
645	if (ret)
646		return ret;
647
648	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
649	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
650	if (shmem->map_wc)
651		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
652
653	return 0;
654}
655EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
656
657/**
658 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
659 * @shmem: shmem GEM object
660 * @p: DRM printer
661 * @indent: Tab indentation level
662 */
663void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
664			      struct drm_printer *p, unsigned int indent)
665{
666	if (shmem->base.import_attach)
667		return;
668
669	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
670	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
671	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
672}
673EXPORT_SYMBOL(drm_gem_shmem_print_info);
674
675/**
676 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
677 *                              pages for a shmem GEM object
678 * @shmem: shmem GEM object
679 *
680 * This function exports a scatter/gather table suitable for PRIME usage by
681 * calling the standard DMA mapping API.
682 *
683 * Drivers who need to acquire an scatter/gather table for objects need to call
684 * drm_gem_shmem_get_pages_sgt() instead.
685 *
686 * Returns:
687 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
688 */
689struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
690{
691	struct drm_gem_object *obj = &shmem->base;
692
693	drm_WARN_ON(obj->dev, obj->import_attach);
694
695	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
696}
697EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
698
699static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
700{
701	struct drm_gem_object *obj = &shmem->base;
702	int ret;
703	struct sg_table *sgt;
704
705	if (shmem->sgt)
706		return shmem->sgt;
707
708	drm_WARN_ON(obj->dev, obj->import_attach);
709
710	ret = drm_gem_shmem_get_pages(shmem);
711	if (ret)
712		return ERR_PTR(ret);
713
714	sgt = drm_gem_shmem_get_sg_table(shmem);
715	if (IS_ERR(sgt)) {
716		ret = PTR_ERR(sgt);
717		goto err_put_pages;
718	}
719	/* Map the pages for use by the h/w. */
720	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
721	if (ret)
722		goto err_free_sgt;
723
724	shmem->sgt = sgt;
725
726	return sgt;
727
728err_free_sgt:
729	sg_free_table(sgt);
730	kfree(sgt);
731err_put_pages:
732	drm_gem_shmem_put_pages(shmem);
733	return ERR_PTR(ret);
734}
735
736/**
737 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
738 *				 scatter/gather table for a shmem GEM object.
739 * @shmem: shmem GEM object
740 *
741 * This function returns a scatter/gather table suitable for driver usage. If
742 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
743 * table created.
744 *
745 * This is the main function for drivers to get at backing storage, and it hides
746 * and difference between dma-buf imported and natively allocated objects.
747 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
748 *
749 * Returns:
750 * A pointer to the scatter/gather table of pinned pages or errno on failure.
751 */
752struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
753{
754	int ret;
755	struct sg_table *sgt;
756
757	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
758	if (ret)
759		return ERR_PTR(ret);
760	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
761	dma_resv_unlock(shmem->base.resv);
762
763	return sgt;
764}
765EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
766
767/**
768 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
769 *                 another driver's scatter/gather table of pinned pages
770 * @dev: Device to import into
771 * @attach: DMA-BUF attachment
772 * @sgt: Scatter/gather table of pinned pages
773 *
774 * This function imports a scatter/gather table exported via DMA-BUF by
775 * another driver. Drivers that use the shmem helpers should set this as their
776 * &drm_driver.gem_prime_import_sg_table callback.
777 *
778 * Returns:
779 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
780 * error code on failure.
781 */
782struct drm_gem_object *
783drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
784				    struct dma_buf_attachment *attach,
785				    struct sg_table *sgt)
786{
787	size_t size = PAGE_ALIGN(attach->dmabuf->size);
788	struct drm_gem_shmem_object *shmem;
789
790	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
791	if (IS_ERR(shmem))
792		return ERR_CAST(shmem);
793
794	shmem->sgt = sgt;
795
796	drm_dbg_prime(dev, "size = %zu\n", size);
797
798	return &shmem->base;
799}
800EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
801
802MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
803MODULE_IMPORT_NS("DMA_BUF");
804MODULE_LICENSE("GPL v2");