Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright 2018 Noralf Trønnes
  4 */
  5
  6#include <linux/dma-buf.h>
  7#include <linux/export.h>
 
  8#include <linux/mutex.h>
  9#include <linux/shmem_fs.h>
 10#include <linux/slab.h>
 11#include <linux/vmalloc.h>
 
 
 
 
 
 12
 13#include <drm/drm.h>
 14#include <drm/drm_device.h>
 15#include <drm/drm_drv.h>
 16#include <drm/drm_gem_shmem_helper.h>
 17#include <drm/drm_prime.h>
 18#include <drm/drm_print.h>
 19
 
 
 20/**
 21 * DOC: overview
 22 *
 23 * This library provides helpers for GEM objects backed by shmem buffers
 24 * allocated using anonymous pageable memory.
 
 
 
 
 
 25 */
 26
 27static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
 28	.free = drm_gem_shmem_free_object,
 29	.print_info = drm_gem_shmem_print_info,
 30	.pin = drm_gem_shmem_pin,
 31	.unpin = drm_gem_shmem_unpin,
 32	.get_sg_table = drm_gem_shmem_get_sg_table,
 33	.vmap = drm_gem_shmem_vmap,
 34	.vunmap = drm_gem_shmem_vunmap,
 
 35	.vm_ops = &drm_gem_shmem_vm_ops,
 36};
 37
 38/**
 39 * drm_gem_shmem_create - Allocate an object with the given size
 40 * @dev: DRM device
 41 * @size: Size of the object to allocate
 42 *
 43 * This function creates a shmem GEM object.
 44 *
 45 * Returns:
 46 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
 47 * error code on failure.
 48 */
 49struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
 50{
 51	struct drm_gem_shmem_object *shmem;
 52	struct drm_gem_object *obj;
 53	int ret;
 54
 55	size = PAGE_ALIGN(size);
 56
 57	if (dev->driver->gem_create_object)
 58		obj = dev->driver->gem_create_object(dev, size);
 59	else
 60		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
 61	if (!obj)
 62		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 63
 64	if (!obj->funcs)
 65		obj->funcs = &drm_gem_shmem_funcs;
 66
 67	ret = drm_gem_object_init(dev, obj, size);
 
 
 
 
 
 68	if (ret)
 69		goto err_free;
 70
 71	ret = drm_gem_create_mmap_offset(obj);
 72	if (ret)
 73		goto err_release;
 74
 75	shmem = to_drm_gem_shmem_obj(obj);
 76	mutex_init(&shmem->pages_lock);
 77	mutex_init(&shmem->vmap_lock);
 78	INIT_LIST_HEAD(&shmem->madv_list);
 79
 80	/*
 81	 * Our buffers are kept pinned, so allocating them
 82	 * from the MOVABLE zone is a really bad idea, and
 83	 * conflicts with CMA. See comments above new_inode()
 84	 * why this is required _and_ expected if you're
 85	 * going to pin these pages.
 86	 */
 87	mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
 88			     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 
 
 89
 90	return shmem;
 91
 92err_release:
 93	drm_gem_object_release(obj);
 94err_free:
 95	kfree(obj);
 96
 97	return ERR_PTR(ret);
 98}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
100
101/**
102 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
103 * @obj: GEM object to free
104 *
105 * This function cleans up the GEM object state and frees the memory used to
106 * store the object itself.
107 */
108void drm_gem_shmem_free_object(struct drm_gem_object *obj)
109{
110	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
111
112	WARN_ON(shmem->vmap_use_count);
113
114	if (obj->import_attach) {
115		shmem->pages_use_count--;
116		drm_prime_gem_destroy(obj, shmem->sgt);
117		kvfree(shmem->pages);
118	} else {
119		if (shmem->sgt) {
120			dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
121				     shmem->sgt->nents, DMA_BIDIRECTIONAL);
122			sg_free_table(shmem->sgt);
123			kfree(shmem->sgt);
124		}
125		if (shmem->pages)
126			drm_gem_shmem_put_pages(shmem);
127	}
128
129	WARN_ON(shmem->pages_use_count);
130
131	drm_gem_object_release(obj);
132	mutex_destroy(&shmem->pages_lock);
133	mutex_destroy(&shmem->vmap_lock);
134	kfree(shmem);
135}
136EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
137
138static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
139{
140	struct drm_gem_object *obj = &shmem->base;
141	struct page **pages;
142
143	if (shmem->pages_use_count++ > 0)
144		return 0;
145
146	pages = drm_gem_get_pages(obj);
147	if (IS_ERR(pages)) {
148		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
149		shmem->pages_use_count = 0;
150		return PTR_ERR(pages);
151	}
152
 
 
 
 
 
 
 
 
 
 
153	shmem->pages = pages;
154
155	return 0;
156}
157
158/*
159 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
160 * @shmem: shmem GEM object
161 *
162 * This function makes sure that backing pages exists for the shmem GEM object
163 * and increases the use count.
164 *
165 * Returns:
166 * 0 on success or a negative error code on failure.
167 */
168int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
169{
170	int ret;
171
 
 
172	ret = mutex_lock_interruptible(&shmem->pages_lock);
173	if (ret)
174		return ret;
175	ret = drm_gem_shmem_get_pages_locked(shmem);
176	mutex_unlock(&shmem->pages_lock);
177
178	return ret;
179}
180EXPORT_SYMBOL(drm_gem_shmem_get_pages);
181
182static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
183{
184	struct drm_gem_object *obj = &shmem->base;
185
186	if (WARN_ON_ONCE(!shmem->pages_use_count))
187		return;
188
189	if (--shmem->pages_use_count > 0)
190		return;
191
 
 
 
 
 
192	drm_gem_put_pages(obj, shmem->pages,
193			  shmem->pages_mark_dirty_on_put,
194			  shmem->pages_mark_accessed_on_put);
195	shmem->pages = NULL;
196}
197
198/*
199 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
200 * @shmem: shmem GEM object
201 *
202 * This function decreases the use count and puts the backing pages when use drops to zero.
203 */
204void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
205{
206	mutex_lock(&shmem->pages_lock);
207	drm_gem_shmem_put_pages_locked(shmem);
208	mutex_unlock(&shmem->pages_lock);
209}
210EXPORT_SYMBOL(drm_gem_shmem_put_pages);
211
212/**
213 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
214 * @obj: GEM object
215 *
216 * This function makes sure the backing pages are pinned in memory while the
217 * buffer is exported.
218 *
219 * Returns:
220 * 0 on success or a negative error code on failure.
221 */
222int drm_gem_shmem_pin(struct drm_gem_object *obj)
223{
224	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
225
226	return drm_gem_shmem_get_pages(shmem);
227}
228EXPORT_SYMBOL(drm_gem_shmem_pin);
229
230/**
231 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
232 * @obj: GEM object
233 *
234 * This function removes the requirement that the backing pages are pinned in
235 * memory.
236 */
237void drm_gem_shmem_unpin(struct drm_gem_object *obj)
238{
239	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
240
241	drm_gem_shmem_put_pages(shmem);
242}
243EXPORT_SYMBOL(drm_gem_shmem_unpin);
244
245static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
 
246{
247	struct drm_gem_object *obj = &shmem->base;
248	int ret;
249
250	if (shmem->vmap_use_count++ > 0)
251		return shmem->vaddr;
 
 
252
253	ret = drm_gem_shmem_get_pages(shmem);
254	if (ret)
255		goto err_zero_use;
 
 
 
 
 
 
 
 
 
256
257	if (obj->import_attach)
258		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
259	else
 
 
 
260		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
261				    VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 
 
 
 
 
262
263	if (!shmem->vaddr) {
264		DRM_DEBUG_KMS("Failed to vmap pages\n");
265		ret = -ENOMEM;
266		goto err_put_pages;
267	}
268
269	return shmem->vaddr;
270
271err_put_pages:
272	drm_gem_shmem_put_pages(shmem);
 
273err_zero_use:
274	shmem->vmap_use_count = 0;
275
276	return ERR_PTR(ret);
277}
278
279/*
280 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
281 * @shmem: shmem GEM object
 
 
 
 
 
 
282 *
283 * This function makes sure that a virtual address exists for the buffer backing
284 * the shmem GEM object.
285 *
286 * Returns:
287 * 0 on success or a negative error code on failure.
288 */
289void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
 
290{
291	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
292	void *vaddr;
293	int ret;
294
295	ret = mutex_lock_interruptible(&shmem->vmap_lock);
296	if (ret)
297		return ERR_PTR(ret);
298	vaddr = drm_gem_shmem_vmap_locked(shmem);
299	mutex_unlock(&shmem->vmap_lock);
300
301	return vaddr;
302}
303EXPORT_SYMBOL(drm_gem_shmem_vmap);
304
305static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
 
306{
307	struct drm_gem_object *obj = &shmem->base;
308
309	if (WARN_ON_ONCE(!shmem->vmap_use_count))
310		return;
311
312	if (--shmem->vmap_use_count > 0)
313		return;
314
315	if (obj->import_attach)
316		dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
317	else
318		vunmap(shmem->vaddr);
 
 
319
320	shmem->vaddr = NULL;
321	drm_gem_shmem_put_pages(shmem);
322}
323
324/*
325 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
326 * @shmem: shmem GEM object
 
 
 
 
 
327 *
328 * This function removes the virtual address when use count drops to zero.
 
329 */
330void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
 
331{
332	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
333
334	mutex_lock(&shmem->vmap_lock);
335	drm_gem_shmem_vunmap_locked(shmem);
336	mutex_unlock(&shmem->vmap_lock);
337}
338EXPORT_SYMBOL(drm_gem_shmem_vunmap);
339
340struct drm_gem_shmem_object *
341drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
342				 struct drm_device *dev, size_t size,
343				 uint32_t *handle)
344{
345	struct drm_gem_shmem_object *shmem;
346	int ret;
347
348	shmem = drm_gem_shmem_create(dev, size);
349	if (IS_ERR(shmem))
350		return shmem;
351
352	/*
353	 * Allocate an id of idr table where the obj is registered
354	 * and handle has the id what user can see.
355	 */
356	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
357	/* drop reference from allocate - handle holds it now. */
358	drm_gem_object_put_unlocked(&shmem->base);
359	if (ret)
360		return ERR_PTR(ret);
361
362	return shmem;
363}
364EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
365
366/* Update madvise status, returns true if not purged, else
367 * false or -errno.
368 */
369int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
370{
371	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
372
373	mutex_lock(&shmem->pages_lock);
374
375	if (shmem->madv >= 0)
376		shmem->madv = madv;
377
378	madv = shmem->madv;
379
380	mutex_unlock(&shmem->pages_lock);
381
382	return (madv >= 0);
383}
384EXPORT_SYMBOL(drm_gem_shmem_madvise);
385
386void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
387{
 
388	struct drm_device *dev = obj->dev;
389	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
390
391	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
392
393	dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
394		     shmem->sgt->nents, DMA_BIDIRECTIONAL);
395	sg_free_table(shmem->sgt);
396	kfree(shmem->sgt);
397	shmem->sgt = NULL;
398
399	drm_gem_shmem_put_pages_locked(shmem);
400
401	shmem->madv = -1;
402
403	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
404	drm_gem_free_mmap_offset(obj);
405
406	/* Our goal here is to return as much of the memory as
407	 * is possible back to the system as we are called from OOM.
408	 * To do this we must instruct the shmfs to drop all of its
409	 * backing pages, *now*.
410	 */
411	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
412
413	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
414			0, (loff_t)-1);
415}
416EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
417
418bool drm_gem_shmem_purge(struct drm_gem_object *obj)
419{
420	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
421
422	if (!mutex_trylock(&shmem->pages_lock))
423		return false;
424	drm_gem_shmem_purge_locked(obj);
425	mutex_unlock(&shmem->pages_lock);
426
427	return true;
428}
429EXPORT_SYMBOL(drm_gem_shmem_purge);
430
431/**
432 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
433 * @file: DRM file structure to create the dumb buffer for
434 * @dev: DRM device
435 * @args: IOCTL data
436 *
437 * This function computes the pitch of the dumb buffer and rounds it up to an
438 * integer number of bytes per pixel. Drivers for hardware that doesn't have
439 * any additional restrictions on the pitch can directly use this function as
440 * their &drm_driver.dumb_create callback.
441 *
442 * For hardware with additional restrictions, drivers can adjust the fields
443 * set up by userspace before calling into this function.
444 *
445 * Returns:
446 * 0 on success or a negative error code on failure.
447 */
448int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
449			      struct drm_mode_create_dumb *args)
450{
451	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
452	struct drm_gem_shmem_object *shmem;
453
454	if (!args->pitch || !args->size) {
455		args->pitch = min_pitch;
456		args->size = args->pitch * args->height;
457	} else {
458		/* ensure sane minimum values */
459		if (args->pitch < min_pitch)
460			args->pitch = min_pitch;
461		if (args->size < args->pitch * args->height)
462			args->size = args->pitch * args->height;
463	}
464
465	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
466
467	return PTR_ERR_OR_ZERO(shmem);
468}
469EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
470
471static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
472{
473	struct vm_area_struct *vma = vmf->vma;
474	struct drm_gem_object *obj = vma->vm_private_data;
475	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
476	loff_t num_pages = obj->size >> PAGE_SHIFT;
 
477	struct page *page;
 
478
479	if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
480		return VM_FAULT_SIGBUS;
481
482	page = shmem->pages[vmf->pgoff];
483
484	return vmf_insert_page(vma, vmf->address, page);
 
 
 
 
 
 
 
 
 
 
 
 
485}
486
487static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
488{
489	struct drm_gem_object *obj = vma->vm_private_data;
490	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
491	int ret;
492
493	ret = drm_gem_shmem_get_pages(shmem);
494	WARN_ON_ONCE(ret != 0);
 
 
 
 
 
 
 
 
 
 
 
495
496	drm_gem_vm_open(vma);
497}
498
499static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
500{
501	struct drm_gem_object *obj = vma->vm_private_data;
502	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
503
504	drm_gem_shmem_put_pages(shmem);
505	drm_gem_vm_close(vma);
506}
507
508const struct vm_operations_struct drm_gem_shmem_vm_ops = {
509	.fault = drm_gem_shmem_fault,
510	.open = drm_gem_shmem_vm_open,
511	.close = drm_gem_shmem_vm_close,
512};
513EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
514
515/**
516 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
517 * @filp: File object
518 * @vma: VMA for the area to be mapped
519 *
520 * This function implements an augmented version of the GEM DRM file mmap
521 * operation for shmem objects. Drivers which employ the shmem helpers should
522 * use this function as their &file_operations.mmap handler in the DRM device file's
523 * file_operations structure.
524 *
525 * Instead of directly referencing this function, drivers should use the
526 * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
527 *
528 * Returns:
529 * 0 on success or a negative error code on failure.
530 */
531int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
532{
533	struct drm_gem_shmem_object *shmem;
534	int ret;
535
536	ret = drm_gem_mmap(filp, vma);
537	if (ret)
538		return ret;
 
539
540	shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
 
541
542	ret = drm_gem_shmem_get_pages(shmem);
543	if (ret) {
544		drm_gem_vm_close(vma);
545		return ret;
546	}
547
548	/* VM_PFNMAP was set by drm_gem_mmap() */
549	vma->vm_flags &= ~VM_PFNMAP;
550	vma->vm_flags |= VM_MIXEDMAP;
551
552	/* Remove the fake offset */
553	vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
554
555	return 0;
556}
557EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
558
559/**
560 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
 
561 * @p: DRM printer
562 * @indent: Tab indentation level
563 * @obj: GEM object
564 */
565void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
566			      const struct drm_gem_object *obj)
567{
568	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
569
570	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
571	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
572	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
573}
574EXPORT_SYMBOL(drm_gem_shmem_print_info);
575
576/**
577 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
578 *                              pages for a shmem GEM object
579 * @obj: GEM object
580 *
581 * This function exports a scatter/gather table suitable for PRIME usage by
582 * calling the standard DMA mapping API.
583 *
 
 
 
584 * Returns:
585 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
586 */
587struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
588{
589	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
 
590
591	return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
592}
593EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
594
595/**
596 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
597 *				 scatter/gather table for a shmem GEM object.
598 * @obj: GEM object
599 *
600 * This function returns a scatter/gather table suitable for driver usage. If
601 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
602 * table created.
603 *
 
 
 
 
604 * Returns:
605 * A pointer to the scatter/gather table of pinned pages or errno on failure.
606 */
607struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
608{
 
609	int ret;
610	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
611	struct sg_table *sgt;
612
613	if (shmem->sgt)
614		return shmem->sgt;
615
616	WARN_ON(obj->import_attach);
617
618	ret = drm_gem_shmem_get_pages(shmem);
619	if (ret)
620		return ERR_PTR(ret);
621
622	sgt = drm_gem_shmem_get_sg_table(&shmem->base);
623	if (IS_ERR(sgt)) {
624		ret = PTR_ERR(sgt);
625		goto err_put_pages;
626	}
627	/* Map the pages for use by the h/w. */
628	dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 
 
629
630	shmem->sgt = sgt;
631
632	return sgt;
633
 
 
 
634err_put_pages:
635	drm_gem_shmem_put_pages(shmem);
636	return ERR_PTR(ret);
637}
638EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
639
640/**
641 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
642 *                 another driver's scatter/gather table of pinned pages
643 * @dev: Device to import into
644 * @attach: DMA-BUF attachment
645 * @sgt: Scatter/gather table of pinned pages
646 *
647 * This function imports a scatter/gather table exported via DMA-BUF by
648 * another driver. Drivers that use the shmem helpers should set this as their
649 * &drm_driver.gem_prime_import_sg_table callback.
650 *
651 * Returns:
652 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
653 * error code on failure.
654 */
655struct drm_gem_object *
656drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
657				    struct dma_buf_attachment *attach,
658				    struct sg_table *sgt)
659{
660	size_t size = PAGE_ALIGN(attach->dmabuf->size);
661	size_t npages = size >> PAGE_SHIFT;
662	struct drm_gem_shmem_object *shmem;
663	int ret;
664
665	shmem = drm_gem_shmem_create(dev, size);
666	if (IS_ERR(shmem))
667		return ERR_CAST(shmem);
668
669	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
670	if (!shmem->pages) {
671		ret = -ENOMEM;
672		goto err_free_gem;
673	}
674
675	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
676	if (ret < 0)
677		goto err_free_array;
678
679	shmem->sgt = sgt;
680	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
681
682	DRM_DEBUG_PRIME("size = %zu\n", size);
683
684	return &shmem->base;
685
686err_free_array:
687	kvfree(shmem->pages);
688err_free_gem:
689	drm_gem_object_put_unlocked(&shmem->base);
690
691	return ERR_PTR(ret);
692}
693EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright 2018 Noralf Trønnes
  4 */
  5
  6#include <linux/dma-buf.h>
  7#include <linux/export.h>
  8#include <linux/module.h>
  9#include <linux/mutex.h>
 10#include <linux/shmem_fs.h>
 11#include <linux/slab.h>
 12#include <linux/vmalloc.h>
 13#include <linux/module.h>
 14
 15#ifdef CONFIG_X86
 16#include <asm/set_memory.h>
 17#endif
 18
 19#include <drm/drm.h>
 20#include <drm/drm_device.h>
 21#include <drm/drm_drv.h>
 22#include <drm/drm_gem_shmem_helper.h>
 23#include <drm/drm_prime.h>
 24#include <drm/drm_print.h>
 25
 26MODULE_IMPORT_NS(DMA_BUF);
 27
 28/**
 29 * DOC: overview
 30 *
 31 * This library provides helpers for GEM objects backed by shmem buffers
 32 * allocated using anonymous pageable memory.
 33 *
 34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
 35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
 36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
 37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
 38 */
 39
 40static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
 41	.free = drm_gem_shmem_object_free,
 42	.print_info = drm_gem_shmem_object_print_info,
 43	.pin = drm_gem_shmem_object_pin,
 44	.unpin = drm_gem_shmem_object_unpin,
 45	.get_sg_table = drm_gem_shmem_object_get_sg_table,
 46	.vmap = drm_gem_shmem_object_vmap,
 47	.vunmap = drm_gem_shmem_object_vunmap,
 48	.mmap = drm_gem_shmem_object_mmap,
 49	.vm_ops = &drm_gem_shmem_vm_ops,
 50};
 51
 52static struct drm_gem_shmem_object *
 53__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
 
 
 
 
 
 
 
 
 
 
 54{
 55	struct drm_gem_shmem_object *shmem;
 56	struct drm_gem_object *obj;
 57	int ret = 0;
 58
 59	size = PAGE_ALIGN(size);
 60
 61	if (dev->driver->gem_create_object) {
 62		obj = dev->driver->gem_create_object(dev, size);
 63		if (IS_ERR(obj))
 64			return ERR_CAST(obj);
 65		shmem = to_drm_gem_shmem_obj(obj);
 66	} else {
 67		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
 68		if (!shmem)
 69			return ERR_PTR(-ENOMEM);
 70		obj = &shmem->base;
 71	}
 72
 73	if (!obj->funcs)
 74		obj->funcs = &drm_gem_shmem_funcs;
 75
 76	if (private) {
 77		drm_gem_private_object_init(dev, obj, size);
 78		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
 79	} else {
 80		ret = drm_gem_object_init(dev, obj, size);
 81	}
 82	if (ret)
 83		goto err_free;
 84
 85	ret = drm_gem_create_mmap_offset(obj);
 86	if (ret)
 87		goto err_release;
 88
 
 89	mutex_init(&shmem->pages_lock);
 90	mutex_init(&shmem->vmap_lock);
 91	INIT_LIST_HEAD(&shmem->madv_list);
 92
 93	if (!private) {
 94		/*
 95		 * Our buffers are kept pinned, so allocating them
 96		 * from the MOVABLE zone is a really bad idea, and
 97		 * conflicts with CMA. See comments above new_inode()
 98		 * why this is required _and_ expected if you're
 99		 * going to pin these pages.
100		 */
101		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103	}
104
105	return shmem;
106
107err_release:
108	drm_gem_object_release(obj);
109err_free:
110	kfree(obj);
111
112	return ERR_PTR(ret);
113}
114/**
115 * drm_gem_shmem_create - Allocate an object with the given size
116 * @dev: DRM device
117 * @size: Size of the object to allocate
118 *
119 * This function creates a shmem GEM object.
120 *
121 * Returns:
122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123 * error code on failure.
124 */
125struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126{
127	return __drm_gem_shmem_create(dev, size, false);
128}
129EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130
131/**
132 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
133 * @shmem: shmem GEM object to free
134 *
135 * This function cleans up the GEM object state and frees the memory used to
136 * store the object itself.
137 */
138void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
139{
140	struct drm_gem_object *obj = &shmem->base;
141
142	WARN_ON(shmem->vmap_use_count);
143
144	if (obj->import_attach) {
 
145		drm_prime_gem_destroy(obj, shmem->sgt);
 
146	} else {
147		if (shmem->sgt) {
148			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
149					  DMA_BIDIRECTIONAL, 0);
150			sg_free_table(shmem->sgt);
151			kfree(shmem->sgt);
152		}
153		if (shmem->pages)
154			drm_gem_shmem_put_pages(shmem);
155	}
156
157	WARN_ON(shmem->pages_use_count);
158
159	drm_gem_object_release(obj);
160	mutex_destroy(&shmem->pages_lock);
161	mutex_destroy(&shmem->vmap_lock);
162	kfree(shmem);
163}
164EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
165
166static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
167{
168	struct drm_gem_object *obj = &shmem->base;
169	struct page **pages;
170
171	if (shmem->pages_use_count++ > 0)
172		return 0;
173
174	pages = drm_gem_get_pages(obj);
175	if (IS_ERR(pages)) {
176		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
177		shmem->pages_use_count = 0;
178		return PTR_ERR(pages);
179	}
180
181	/*
182	 * TODO: Allocating WC pages which are correctly flushed is only
183	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
184	 * ttm_pool.c could use.
185	 */
186#ifdef CONFIG_X86
187	if (shmem->map_wc)
188		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
189#endif
190
191	shmem->pages = pages;
192
193	return 0;
194}
195
196/*
197 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
198 * @shmem: shmem GEM object
199 *
200 * This function makes sure that backing pages exists for the shmem GEM object
201 * and increases the use count.
202 *
203 * Returns:
204 * 0 on success or a negative error code on failure.
205 */
206int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
207{
208	int ret;
209
210	WARN_ON(shmem->base.import_attach);
211
212	ret = mutex_lock_interruptible(&shmem->pages_lock);
213	if (ret)
214		return ret;
215	ret = drm_gem_shmem_get_pages_locked(shmem);
216	mutex_unlock(&shmem->pages_lock);
217
218	return ret;
219}
220EXPORT_SYMBOL(drm_gem_shmem_get_pages);
221
222static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
223{
224	struct drm_gem_object *obj = &shmem->base;
225
226	if (WARN_ON_ONCE(!shmem->pages_use_count))
227		return;
228
229	if (--shmem->pages_use_count > 0)
230		return;
231
232#ifdef CONFIG_X86
233	if (shmem->map_wc)
234		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
235#endif
236
237	drm_gem_put_pages(obj, shmem->pages,
238			  shmem->pages_mark_dirty_on_put,
239			  shmem->pages_mark_accessed_on_put);
240	shmem->pages = NULL;
241}
242
243/*
244 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
245 * @shmem: shmem GEM object
246 *
247 * This function decreases the use count and puts the backing pages when use drops to zero.
248 */
249void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
250{
251	mutex_lock(&shmem->pages_lock);
252	drm_gem_shmem_put_pages_locked(shmem);
253	mutex_unlock(&shmem->pages_lock);
254}
255EXPORT_SYMBOL(drm_gem_shmem_put_pages);
256
257/**
258 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
259 * @shmem: shmem GEM object
260 *
261 * This function makes sure the backing pages are pinned in memory while the
262 * buffer is exported.
263 *
264 * Returns:
265 * 0 on success or a negative error code on failure.
266 */
267int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
268{
269	WARN_ON(shmem->base.import_attach);
270
271	return drm_gem_shmem_get_pages(shmem);
272}
273EXPORT_SYMBOL(drm_gem_shmem_pin);
274
275/**
276 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
277 * @shmem: shmem GEM object
278 *
279 * This function removes the requirement that the backing pages are pinned in
280 * memory.
281 */
282void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
283{
284	WARN_ON(shmem->base.import_attach);
285
286	drm_gem_shmem_put_pages(shmem);
287}
288EXPORT_SYMBOL(drm_gem_shmem_unpin);
289
290static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
291				     struct iosys_map *map)
292{
293	struct drm_gem_object *obj = &shmem->base;
294	int ret = 0;
295
296	if (shmem->vmap_use_count++ > 0) {
297		iosys_map_set_vaddr(map, shmem->vaddr);
298		return 0;
299	}
300
301	if (obj->import_attach) {
302		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
303		if (!ret) {
304			if (WARN_ON(map->is_iomem)) {
305				dma_buf_vunmap(obj->import_attach->dmabuf, map);
306				ret = -EIO;
307				goto err_put_pages;
308			}
309			shmem->vaddr = map->vaddr;
310		}
311	} else {
312		pgprot_t prot = PAGE_KERNEL;
313
314		ret = drm_gem_shmem_get_pages(shmem);
315		if (ret)
316			goto err_zero_use;
317
318		if (shmem->map_wc)
319			prot = pgprot_writecombine(prot);
320		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
321				    VM_MAP, prot);
322		if (!shmem->vaddr)
323			ret = -ENOMEM;
324		else
325			iosys_map_set_vaddr(map, shmem->vaddr);
326	}
327
328	if (ret) {
329		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
 
330		goto err_put_pages;
331	}
332
333	return 0;
334
335err_put_pages:
336	if (!obj->import_attach)
337		drm_gem_shmem_put_pages(shmem);
338err_zero_use:
339	shmem->vmap_use_count = 0;
340
341	return ret;
342}
343
344/*
345 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
346 * @shmem: shmem GEM object
347 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
348 *       store.
349 *
350 * This function makes sure that a contiguous kernel virtual address mapping
351 * exists for the buffer backing the shmem GEM object. It hides the differences
352 * between dma-buf imported and natively allocated objects.
353 *
354 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
 
355 *
356 * Returns:
357 * 0 on success or a negative error code on failure.
358 */
359int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
360		       struct iosys_map *map)
361{
 
 
362	int ret;
363
364	ret = mutex_lock_interruptible(&shmem->vmap_lock);
365	if (ret)
366		return ret;
367	ret = drm_gem_shmem_vmap_locked(shmem, map);
368	mutex_unlock(&shmem->vmap_lock);
369
370	return ret;
371}
372EXPORT_SYMBOL(drm_gem_shmem_vmap);
373
374static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
375					struct iosys_map *map)
376{
377	struct drm_gem_object *obj = &shmem->base;
378
379	if (WARN_ON_ONCE(!shmem->vmap_use_count))
380		return;
381
382	if (--shmem->vmap_use_count > 0)
383		return;
384
385	if (obj->import_attach) {
386		dma_buf_vunmap(obj->import_attach->dmabuf, map);
387	} else {
388		vunmap(shmem->vaddr);
389		drm_gem_shmem_put_pages(shmem);
390	}
391
392	shmem->vaddr = NULL;
 
393}
394
395/*
396 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
397 * @shmem: shmem GEM object
398 * @map: Kernel virtual address where the SHMEM GEM object was mapped
399 *
400 * This function cleans up a kernel virtual address mapping acquired by
401 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
402 * zero.
403 *
404 * This function hides the differences between dma-buf imported and natively
405 * allocated objects.
406 */
407void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
408			  struct iosys_map *map)
409{
 
 
410	mutex_lock(&shmem->vmap_lock);
411	drm_gem_shmem_vunmap_locked(shmem, map);
412	mutex_unlock(&shmem->vmap_lock);
413}
414EXPORT_SYMBOL(drm_gem_shmem_vunmap);
415
416static struct drm_gem_shmem_object *
417drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
418				 struct drm_device *dev, size_t size,
419				 uint32_t *handle)
420{
421	struct drm_gem_shmem_object *shmem;
422	int ret;
423
424	shmem = drm_gem_shmem_create(dev, size);
425	if (IS_ERR(shmem))
426		return shmem;
427
428	/*
429	 * Allocate an id of idr table where the obj is registered
430	 * and handle has the id what user can see.
431	 */
432	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
433	/* drop reference from allocate - handle holds it now. */
434	drm_gem_object_put(&shmem->base);
435	if (ret)
436		return ERR_PTR(ret);
437
438	return shmem;
439}
 
440
441/* Update madvise status, returns true if not purged, else
442 * false or -errno.
443 */
444int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
445{
 
 
446	mutex_lock(&shmem->pages_lock);
447
448	if (shmem->madv >= 0)
449		shmem->madv = madv;
450
451	madv = shmem->madv;
452
453	mutex_unlock(&shmem->pages_lock);
454
455	return (madv >= 0);
456}
457EXPORT_SYMBOL(drm_gem_shmem_madvise);
458
459void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
460{
461	struct drm_gem_object *obj = &shmem->base;
462	struct drm_device *dev = obj->dev;
 
463
464	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
465
466	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
 
467	sg_free_table(shmem->sgt);
468	kfree(shmem->sgt);
469	shmem->sgt = NULL;
470
471	drm_gem_shmem_put_pages_locked(shmem);
472
473	shmem->madv = -1;
474
475	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
476	drm_gem_free_mmap_offset(obj);
477
478	/* Our goal here is to return as much of the memory as
479	 * is possible back to the system as we are called from OOM.
480	 * To do this we must instruct the shmfs to drop all of its
481	 * backing pages, *now*.
482	 */
483	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
484
485	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
 
486}
487EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
488
489bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
490{
 
 
491	if (!mutex_trylock(&shmem->pages_lock))
492		return false;
493	drm_gem_shmem_purge_locked(shmem);
494	mutex_unlock(&shmem->pages_lock);
495
496	return true;
497}
498EXPORT_SYMBOL(drm_gem_shmem_purge);
499
500/**
501 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
502 * @file: DRM file structure to create the dumb buffer for
503 * @dev: DRM device
504 * @args: IOCTL data
505 *
506 * This function computes the pitch of the dumb buffer and rounds it up to an
507 * integer number of bytes per pixel. Drivers for hardware that doesn't have
508 * any additional restrictions on the pitch can directly use this function as
509 * their &drm_driver.dumb_create callback.
510 *
511 * For hardware with additional restrictions, drivers can adjust the fields
512 * set up by userspace before calling into this function.
513 *
514 * Returns:
515 * 0 on success or a negative error code on failure.
516 */
517int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
518			      struct drm_mode_create_dumb *args)
519{
520	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
521	struct drm_gem_shmem_object *shmem;
522
523	if (!args->pitch || !args->size) {
524		args->pitch = min_pitch;
525		args->size = PAGE_ALIGN(args->pitch * args->height);
526	} else {
527		/* ensure sane minimum values */
528		if (args->pitch < min_pitch)
529			args->pitch = min_pitch;
530		if (args->size < args->pitch * args->height)
531			args->size = PAGE_ALIGN(args->pitch * args->height);
532	}
533
534	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
535
536	return PTR_ERR_OR_ZERO(shmem);
537}
538EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
539
540static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
541{
542	struct vm_area_struct *vma = vmf->vma;
543	struct drm_gem_object *obj = vma->vm_private_data;
544	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
545	loff_t num_pages = obj->size >> PAGE_SHIFT;
546	vm_fault_t ret;
547	struct page *page;
548	pgoff_t page_offset;
549
550	/* We don't use vmf->pgoff since that has the fake offset */
551	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
552
553	mutex_lock(&shmem->pages_lock);
554
555	if (page_offset >= num_pages ||
556	    WARN_ON_ONCE(!shmem->pages) ||
557	    shmem->madv < 0) {
558		ret = VM_FAULT_SIGBUS;
559	} else {
560		page = shmem->pages[page_offset];
561
562		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
563	}
564
565	mutex_unlock(&shmem->pages_lock);
566
567	return ret;
568}
569
570static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
571{
572	struct drm_gem_object *obj = vma->vm_private_data;
573	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
574
575	WARN_ON(shmem->base.import_attach);
576
577	mutex_lock(&shmem->pages_lock);
578
579	/*
580	 * We should have already pinned the pages when the buffer was first
581	 * mmap'd, vm_open() just grabs an additional reference for the new
582	 * mm the vma is getting copied into (ie. on fork()).
583	 */
584	if (!WARN_ON_ONCE(!shmem->pages_use_count))
585		shmem->pages_use_count++;
586
587	mutex_unlock(&shmem->pages_lock);
588
589	drm_gem_vm_open(vma);
590}
591
592static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
593{
594	struct drm_gem_object *obj = vma->vm_private_data;
595	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
596
597	drm_gem_shmem_put_pages(shmem);
598	drm_gem_vm_close(vma);
599}
600
601const struct vm_operations_struct drm_gem_shmem_vm_ops = {
602	.fault = drm_gem_shmem_fault,
603	.open = drm_gem_shmem_vm_open,
604	.close = drm_gem_shmem_vm_close,
605};
606EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
607
608/**
609 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
610 * @shmem: shmem GEM object
611 * @vma: VMA for the area to be mapped
612 *
613 * This function implements an augmented version of the GEM DRM file mmap
614 * operation for shmem objects.
 
 
 
 
 
615 *
616 * Returns:
617 * 0 on success or a negative error code on failure.
618 */
619int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
620{
621	struct drm_gem_object *obj = &shmem->base;
622	int ret;
623
624	if (obj->import_attach) {
625		/* Drop the reference drm_gem_mmap_obj() acquired.*/
626		drm_gem_object_put(obj);
627		vma->vm_private_data = NULL;
628
629		return dma_buf_mmap(obj->dma_buf, vma, 0);
630	}
631
632	ret = drm_gem_shmem_get_pages(shmem);
633	if (ret)
 
634		return ret;
 
635
636	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
637	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
638	if (shmem->map_wc)
639		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
 
640
641	return 0;
642}
643EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
644
645/**
646 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
647 * @shmem: shmem GEM object
648 * @p: DRM printer
649 * @indent: Tab indentation level
 
650 */
651void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
652			      struct drm_printer *p, unsigned int indent)
653{
 
 
654	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
655	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
656	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
657}
658EXPORT_SYMBOL(drm_gem_shmem_print_info);
659
660/**
661 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
662 *                              pages for a shmem GEM object
663 * @shmem: shmem GEM object
664 *
665 * This function exports a scatter/gather table suitable for PRIME usage by
666 * calling the standard DMA mapping API.
667 *
668 * Drivers who need to acquire an scatter/gather table for objects need to call
669 * drm_gem_shmem_get_pages_sgt() instead.
670 *
671 * Returns:
672 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
673 */
674struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
675{
676	struct drm_gem_object *obj = &shmem->base;
677
678	WARN_ON(shmem->base.import_attach);
679
680	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
681}
682EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
683
684/**
685 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
686 *				 scatter/gather table for a shmem GEM object.
687 * @shmem: shmem GEM object
688 *
689 * This function returns a scatter/gather table suitable for driver usage. If
690 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
691 * table created.
692 *
693 * This is the main function for drivers to get at backing storage, and it hides
694 * and difference between dma-buf imported and natively allocated objects.
695 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
696 *
697 * Returns:
698 * A pointer to the scatter/gather table of pinned pages or errno on failure.
699 */
700struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
701{
702	struct drm_gem_object *obj = &shmem->base;
703	int ret;
 
704	struct sg_table *sgt;
705
706	if (shmem->sgt)
707		return shmem->sgt;
708
709	WARN_ON(obj->import_attach);
710
711	ret = drm_gem_shmem_get_pages(shmem);
712	if (ret)
713		return ERR_PTR(ret);
714
715	sgt = drm_gem_shmem_get_sg_table(shmem);
716	if (IS_ERR(sgt)) {
717		ret = PTR_ERR(sgt);
718		goto err_put_pages;
719	}
720	/* Map the pages for use by the h/w. */
721	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
722	if (ret)
723		goto err_free_sgt;
724
725	shmem->sgt = sgt;
726
727	return sgt;
728
729err_free_sgt:
730	sg_free_table(sgt);
731	kfree(sgt);
732err_put_pages:
733	drm_gem_shmem_put_pages(shmem);
734	return ERR_PTR(ret);
735}
736EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
737
738/**
739 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
740 *                 another driver's scatter/gather table of pinned pages
741 * @dev: Device to import into
742 * @attach: DMA-BUF attachment
743 * @sgt: Scatter/gather table of pinned pages
744 *
745 * This function imports a scatter/gather table exported via DMA-BUF by
746 * another driver. Drivers that use the shmem helpers should set this as their
747 * &drm_driver.gem_prime_import_sg_table callback.
748 *
749 * Returns:
750 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
751 * error code on failure.
752 */
753struct drm_gem_object *
754drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
755				    struct dma_buf_attachment *attach,
756				    struct sg_table *sgt)
757{
758	size_t size = PAGE_ALIGN(attach->dmabuf->size);
 
759	struct drm_gem_shmem_object *shmem;
 
760
761	shmem = __drm_gem_shmem_create(dev, size, true);
762	if (IS_ERR(shmem))
763		return ERR_CAST(shmem);
764
 
 
 
 
 
 
 
 
 
 
765	shmem->sgt = sgt;
 
766
767	DRM_DEBUG_PRIME("size = %zu\n", size);
768
769	return &shmem->base;
 
 
 
 
 
 
 
770}
771EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
772
773MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
774MODULE_IMPORT_NS(DMA_BUF);
775MODULE_LICENSE("GPL v2");