Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright © 2012 Red Hat
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *      Dave Airlie <airlied@redhat.com>
 25 *      Rob Clark <rob.clark@linaro.org>
 26 *
 27 */
 28
 29#include <linux/export.h>
 30#include <linux/dma-buf.h>
 31#include <drm/drmP.h>
 
 
 
 
 
 
 32#include <drm/drm_gem.h>
 
 33
 34#include "drm_internal.h"
 35
 36/*
 37 * DMA-BUF/GEM Object references and lifetime overview:
 
 
 
 
 
 
 
 
 
 
 
 
 
 38 *
 39 * On the export the dma_buf holds a reference to the exporting GEM
 40 * object. It takes this reference in handle_to_fd_ioctl, when it
 41 * first calls .prime_export and stores the exporting GEM object in
 42 * the dma_buf priv. This reference is released when the dma_buf
 43 * object goes away in the driver .release function.
 44 *
 45 * On the import the importing GEM object holds a reference to the
 46 * dma_buf (which in turn holds a ref to the exporting GEM object).
 47 * It takes that reference in the fd_to_handle ioctl.
 48 * It calls dma_buf_get, creates an attachment to it and stores the
 49 * attachment in the GEM object. When this attachment is destroyed
 50 * when the imported object is destroyed, we remove the attachment
 51 * and drop the reference to the dma_buf.
 52 *
 53 * Thus the chain of references always flows in one direction
 54 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
 55 *
 56 * Self-importing: if userspace is using PRIME as a replacement for flink
 57 * then it will get a fd->handle request for a GEM object that it created.
 58 * Drivers should detect this situation and return back the gem object
 59 * from the dma-buf private.  Prime will do this automatically for drivers that
 60 * use the drm_gem_prime_{import,export} helpers.
 
 
 
 
 
 61 */
 62
 63struct drm_prime_member {
 64	struct list_head entry;
 65	struct dma_buf *dma_buf;
 66	uint32_t handle;
 67};
 68
 69struct drm_prime_attachment {
 70	struct sg_table *sgt;
 71	enum dma_data_direction dir;
 72};
 73
 74static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
 75				    struct dma_buf *dma_buf, uint32_t handle)
 76{
 77	struct drm_prime_member *member;
 
 78
 79	member = kmalloc(sizeof(*member), GFP_KERNEL);
 80	if (!member)
 81		return -ENOMEM;
 82
 83	get_dma_buf(dma_buf);
 84	member->dma_buf = dma_buf;
 85	member->handle = handle;
 86	list_add(&member->entry, &prime_fpriv->head);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87	return 0;
 88}
 89
 90static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
 91						      uint32_t handle)
 92{
 93	struct drm_prime_member *member;
 94
 95	list_for_each_entry(member, &prime_fpriv->head, entry) {
 
 
 
 
 96		if (member->handle == handle)
 97			return member->dma_buf;
 
 
 
 
 98	}
 99
100	return NULL;
101}
102
103static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
104				       struct dma_buf *dma_buf,
105				       uint32_t *handle)
106{
107	struct drm_prime_member *member;
108
109	list_for_each_entry(member, &prime_fpriv->head, entry) {
 
 
 
 
110		if (member->dma_buf == dma_buf) {
111			*handle = member->handle;
112			return 0;
 
 
 
 
113		}
114	}
 
115	return -ENOENT;
116}
117
118static int drm_gem_map_attach(struct dma_buf *dma_buf,
119			      struct device *target_dev,
120			      struct dma_buf_attachment *attach)
121{
122	struct drm_prime_attachment *prime_attach;
123	struct drm_gem_object *obj = dma_buf->priv;
124	struct drm_device *dev = obj->dev;
125
126	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
127	if (!prime_attach)
128		return -ENOMEM;
129
130	prime_attach->dir = DMA_NONE;
131	attach->priv = prime_attach;
132
133	if (!dev->driver->gem_prime_pin)
134		return 0;
 
135
136	return dev->driver->gem_prime_pin(obj);
137}
138
139static void drm_gem_map_detach(struct dma_buf *dma_buf,
140			       struct dma_buf_attachment *attach)
141{
142	struct drm_prime_attachment *prime_attach = attach->priv;
143	struct drm_gem_object *obj = dma_buf->priv;
144	struct drm_device *dev = obj->dev;
145	struct sg_table *sgt;
146
147	if (dev->driver->gem_prime_unpin)
148		dev->driver->gem_prime_unpin(obj);
149
150	if (!prime_attach)
151		return;
152
153	sgt = prime_attach->sgt;
154	if (sgt) {
155		if (prime_attach->dir != DMA_NONE)
156			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
157					prime_attach->dir);
158		sg_free_table(sgt);
 
 
159	}
160
161	kfree(sgt);
162	kfree(prime_attach);
163	attach->priv = NULL;
164}
165
166void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
167					struct dma_buf *dma_buf)
168{
169	struct drm_prime_member *member, *safe;
170
171	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
172		if (member->dma_buf == dma_buf) {
173			dma_buf_put(dma_buf);
174			list_del(&member->entry);
175			kfree(member);
176		}
177	}
178}
179
180static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
181					    enum dma_data_direction dir)
182{
183	struct drm_prime_attachment *prime_attach = attach->priv;
184	struct drm_gem_object *obj = attach->dmabuf->priv;
185	struct sg_table *sgt;
186
187	if (WARN_ON(dir == DMA_NONE || !prime_attach))
188		return ERR_PTR(-EINVAL);
189
190	/* return the cached mapping when possible */
191	if (prime_attach->dir == dir)
192		return prime_attach->sgt;
193
194	/*
195	 * two mappings with different directions for the same attachment are
196	 * not allowed
197	 */
198	if (WARN_ON(prime_attach->dir != DMA_NONE))
199		return ERR_PTR(-EBUSY);
 
 
 
 
 
 
 
 
 
 
 
200
201	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
 
 
202
203	if (!IS_ERR(sgt)) {
204		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
205			sg_free_table(sgt);
206			kfree(sgt);
207			sgt = ERR_PTR(-ENOMEM);
208		} else {
209			prime_attach->sgt = sgt;
210			prime_attach->dir = dir;
211		}
212	}
213
214	return sgt;
215}
216
217static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
218				  struct sg_table *sgt,
219				  enum dma_data_direction dir)
220{
221	/* nothing to be done here */
222}
 
223
224/**
225 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
226 * @dma_buf: buffer to be released
227 *
228 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
229 * must use this in their dma_buf ops structure as the release callback.
 
 
230 */
231void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
232{
233	struct drm_gem_object *obj = dma_buf->priv;
 
234
235	/* drop the reference on the export fd holds */
236	drm_gem_object_unreference_unlocked(obj);
 
 
237}
238EXPORT_SYMBOL(drm_gem_dmabuf_release);
239
240static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241{
242	struct drm_gem_object *obj = dma_buf->priv;
243	struct drm_device *dev = obj->dev;
 
244
245	return dev->driver->gem_prime_vmap(obj);
246}
 
247
248static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
249{
250	struct drm_gem_object *obj = dma_buf->priv;
251	struct drm_device *dev = obj->dev;
252
253	dev->driver->gem_prime_vunmap(obj, vaddr);
254}
 
 
255
256static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
257					unsigned long page_num)
258{
259	return NULL;
260}
 
 
 
 
 
261
262static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
263					 unsigned long page_num, void *addr)
264{
 
 
 
265
266}
267static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
268				 unsigned long page_num)
269{
270	return NULL;
271}
272
273static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
274				  unsigned long page_num, void *addr)
275{
 
 
276
277}
278
279static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
280			       struct vm_area_struct *vma)
281{
282	struct drm_gem_object *obj = dma_buf->priv;
283	struct drm_device *dev = obj->dev;
284
285	if (!dev->driver->gem_prime_mmap)
286		return -ENOSYS;
 
 
 
 
 
287
288	return dev->driver->gem_prime_mmap(obj, vma);
 
 
 
 
 
289}
 
290
291static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
292	.attach = drm_gem_map_attach,
293	.detach = drm_gem_map_detach,
294	.map_dma_buf = drm_gem_map_dma_buf,
295	.unmap_dma_buf = drm_gem_unmap_dma_buf,
296	.release = drm_gem_dmabuf_release,
297	.kmap = drm_gem_dmabuf_kmap,
298	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
299	.kunmap = drm_gem_dmabuf_kunmap,
300	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
301	.mmap = drm_gem_dmabuf_mmap,
302	.vmap = drm_gem_dmabuf_vmap,
303	.vunmap = drm_gem_dmabuf_vunmap,
304};
305
306/**
307 * DOC: PRIME Helpers
308 *
309 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
310 * simpler APIs by using the helper functions @drm_gem_prime_export and
311 * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
312 * six lower-level driver callbacks:
313 *
314 * Export callbacks:
315 *
316 *  * @gem_prime_pin (optional): prepare a GEM object for exporting
317 *  * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
318 *  * @gem_prime_vmap: vmap a buffer exported by your driver
319 *  * @gem_prime_vunmap: vunmap a buffer exported by your driver
320 *  * @gem_prime_mmap (optional): mmap a buffer exported by your driver
321 *
322 * Import callback:
323 *
324 *  * @gem_prime_import_sg_table (import): produce a GEM object from another
325 *    driver's scatter/gather table
326 */
327
328/**
329 * drm_gem_prime_export - helper library implementation of the export callback
330 * @dev: drm_device to export from
331 * @obj: GEM object to export
332 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
333 *
334 * This is the implementation of the gem_prime_export functions for GEM drivers
335 * using the PRIME helpers.
336 */
337struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
338				     struct drm_gem_object *obj, int flags)
339{
340	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
341
342	exp_info.ops = &drm_gem_prime_dmabuf_ops;
343	exp_info.size = obj->size;
344	exp_info.flags = flags;
345	exp_info.priv = obj;
346
347	if (dev->driver->gem_prime_res_obj)
348		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
 
 
349
350	return dma_buf_export(&exp_info);
351}
352EXPORT_SYMBOL(drm_gem_prime_export);
353
354static struct dma_buf *export_and_register_object(struct drm_device *dev,
355						  struct drm_gem_object *obj,
356						  uint32_t flags)
357{
358	struct dma_buf *dmabuf;
359
360	/* prevent races with concurrent gem_close. */
361	if (obj->handle_count == 0) {
362		dmabuf = ERR_PTR(-ENOENT);
363		return dmabuf;
364	}
365
366	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
 
 
 
367	if (IS_ERR(dmabuf)) {
368		/* normally the created dma-buf takes ownership of the ref,
369		 * but if that fails then drop the ref
370		 */
371		return dmabuf;
372	}
373
374	/*
375	 * Note that callers do not need to clean up the export cache
376	 * since the check for obj->handle_count guarantees that someone
377	 * will clean it up.
378	 */
379	obj->dma_buf = dmabuf;
380	get_dma_buf(obj->dma_buf);
381	/* Grab a new ref since the callers is now used by the dma-buf */
382	drm_gem_object_reference(obj);
383
384	return dmabuf;
385}
386
387/**
388 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
389 * @dev: dev to export the buffer from
390 * @file_priv: drm file-private structure
391 * @handle: buffer handle to export
392 * @flags: flags like DRM_CLOEXEC
393 * @prime_fd: pointer to storage for the fd id of the create dma-buf
394 *
395 * This is the PRIME export function which must be used mandatorily by GEM
396 * drivers to ensure correct lifetime management of the underlying GEM object.
397 * The actual exporting from GEM object to a dma-buf is done through the
398 * gem_prime_export driver callback.
 
 
 
 
 
 
 
 
 
 
399 */
400int drm_gem_prime_handle_to_fd(struct drm_device *dev,
401			       struct drm_file *file_priv, uint32_t handle,
402			       uint32_t flags,
403			       int *prime_fd)
404{
405	struct drm_gem_object *obj;
406	int ret = 0;
407	struct dma_buf *dmabuf;
408
409	mutex_lock(&file_priv->prime.lock);
410	obj = drm_gem_object_lookup(dev, file_priv, handle);
411	if (!obj)  {
412		ret = -ENOENT;
413		goto out_unlock;
414	}
415
416	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
417	if (dmabuf) {
418		get_dma_buf(dmabuf);
419		goto out_have_handle;
420	}
421
422	mutex_lock(&dev->object_name_lock);
423	/* re-export the original imported object */
424	if (obj->import_attach) {
425		dmabuf = obj->import_attach->dmabuf;
426		get_dma_buf(dmabuf);
427		goto out_have_obj;
428	}
429
430	if (obj->dma_buf) {
431		get_dma_buf(obj->dma_buf);
432		dmabuf = obj->dma_buf;
433		goto out_have_obj;
434	}
435
436	dmabuf = export_and_register_object(dev, obj, flags);
437	if (IS_ERR(dmabuf)) {
438		/* normally the created dma-buf takes ownership of the ref,
439		 * but if that fails then drop the ref
440		 */
441		ret = PTR_ERR(dmabuf);
442		mutex_unlock(&dev->object_name_lock);
443		goto out;
444	}
445
446out_have_obj:
447	/*
448	 * If we've exported this buffer then cheat and add it to the import list
449	 * so we get the correct handle back. We must do this under the
450	 * protection of dev->object_name_lock to ensure that a racing gem close
451	 * ioctl doesn't miss to remove this buffer handle from the cache.
452	 */
453	ret = drm_prime_add_buf_handle(&file_priv->prime,
454				       dmabuf, handle);
455	mutex_unlock(&dev->object_name_lock);
456	if (ret)
457		goto fail_put_dmabuf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
459out_have_handle:
460	ret = dma_buf_fd(dmabuf, flags);
461	/*
462	 * We must _not_ remove the buffer from the handle cache since the newly
463	 * created dma buf is already linked in the global obj->dma_buf pointer,
464	 * and that is invariant as long as a userspace gem handle exists.
465	 * Closing the handle will clean out the cache anyway, so we don't leak.
466	 */
467	if (ret < 0) {
468		goto fail_put_dmabuf;
469	} else {
470		*prime_fd = ret;
471		ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472	}
473
474	goto out;
 
 
 
 
 
 
 
 
 
475
476fail_put_dmabuf:
477	dma_buf_put(dmabuf);
 
 
 
 
 
478out:
479	drm_gem_object_unreference_unlocked(obj);
480out_unlock:
481	mutex_unlock(&file_priv->prime.lock);
482
483	return ret;
484}
485EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
486
487/**
488 * drm_gem_prime_import - helper library implementation of the import callback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
489 * @dev: drm_device to import into
490 * @dma_buf: dma-buf object to import
 
491 *
492 * This is the implementation of the gem_prime_import functions for GEM drivers
493 * using the PRIME helpers.
 
 
 
 
 
494 */
495struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
496					    struct dma_buf *dma_buf)
 
497{
498	struct dma_buf_attachment *attach;
499	struct sg_table *sgt;
500	struct drm_gem_object *obj;
501	int ret;
502
503	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
504		obj = dma_buf->priv;
505		if (obj->dev == dev) {
506			/*
507			 * Importing dmabuf exported from out own gem increases
508			 * refcount on gem itself instead of f_count of dmabuf.
509			 */
510			drm_gem_object_reference(obj);
511			return obj;
512		}
513	}
514
515	if (!dev->driver->gem_prime_import_sg_table)
516		return ERR_PTR(-EINVAL);
517
518	attach = dma_buf_attach(dma_buf, dev->dev);
519	if (IS_ERR(attach))
520		return ERR_CAST(attach);
521
522	get_dma_buf(dma_buf);
523
524	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
525	if (IS_ERR(sgt)) {
526		ret = PTR_ERR(sgt);
527		goto fail_detach;
528	}
529
530	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
531	if (IS_ERR(obj)) {
532		ret = PTR_ERR(obj);
533		goto fail_unmap;
534	}
535
536	obj->import_attach = attach;
 
537
538	return obj;
539
540fail_unmap:
541	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
542fail_detach:
543	dma_buf_detach(dma_buf, attach);
544	dma_buf_put(dma_buf);
545
546	return ERR_PTR(ret);
547}
548EXPORT_SYMBOL(drm_gem_prime_import);
549
550/**
551 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
552 * @dev: dev to export the buffer from
553 * @file_priv: drm file-private structure
554 * @prime_fd: fd id of the dma-buf which should be imported
555 * @handle: pointer to storage for the handle of the imported buffer object
556 *
557 * This is the PRIME import function which must be used mandatorily by GEM
558 * drivers to ensure correct lifetime management of the underlying GEM object.
559 * The actual importing of GEM object from the dma-buf is done through the
560 * gem_import_export driver callback.
 
 
 
561 */
562int drm_gem_prime_fd_to_handle(struct drm_device *dev,
563			       struct drm_file *file_priv, int prime_fd,
564			       uint32_t *handle)
565{
566	struct dma_buf *dma_buf;
567	struct drm_gem_object *obj;
568	int ret;
569
570	dma_buf = dma_buf_get(prime_fd);
571	if (IS_ERR(dma_buf))
572		return PTR_ERR(dma_buf);
573
574	mutex_lock(&file_priv->prime.lock);
575
576	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
577			dma_buf, handle);
578	if (ret == 0)
579		goto out_put;
580
581	/* never seen this one, need to import */
582	mutex_lock(&dev->object_name_lock);
583	obj = dev->driver->gem_prime_import(dev, dma_buf);
584	if (IS_ERR(obj)) {
585		ret = PTR_ERR(obj);
586		goto out_unlock;
587	}
588
589	if (obj->dma_buf) {
590		WARN_ON(obj->dma_buf != dma_buf);
591	} else {
592		obj->dma_buf = dma_buf;
593		get_dma_buf(dma_buf);
594	}
595
596	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
597	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
598	drm_gem_object_unreference_unlocked(obj);
599	if (ret)
600		goto out_put;
601
602	ret = drm_prime_add_buf_handle(&file_priv->prime,
603			dma_buf, *handle);
604	if (ret)
605		goto fail;
606
607	mutex_unlock(&file_priv->prime.lock);
608
609	dma_buf_put(dma_buf);
610
611	return 0;
612
613fail:
614	/* hmm, if driver attached, we are relying on the free-object path
615	 * to detach.. which seems ok..
616	 */
617	drm_gem_handle_delete(file_priv, *handle);
618out_unlock:
619	mutex_unlock(&dev->object_name_lock);
620out_put:
621	dma_buf_put(dma_buf);
622	mutex_unlock(&file_priv->prime.lock);
623	return ret;
624}
625EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
626
627int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
628				 struct drm_file *file_priv)
629{
630	struct drm_prime_handle *args = data;
631
632	if (!drm_core_check_feature(dev, DRIVER_PRIME))
633		return -EINVAL;
634
635	if (!dev->driver->prime_handle_to_fd)
636		return -ENOSYS;
637
638	/* check flags are valid */
639	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
640		return -EINVAL;
641
642	return dev->driver->prime_handle_to_fd(dev, file_priv,
643			args->handle, args->flags, &args->fd);
644}
645
646int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
647				 struct drm_file *file_priv)
648{
649	struct drm_prime_handle *args = data;
650
651	if (!drm_core_check_feature(dev, DRIVER_PRIME))
652		return -EINVAL;
653
654	if (!dev->driver->prime_fd_to_handle)
655		return -ENOSYS;
656
657	return dev->driver->prime_fd_to_handle(dev, file_priv,
658			args->fd, &args->handle);
659}
 
660
661/**
662 * drm_prime_pages_to_sg - converts a page array into an sg list
663 * @pages: pointer to the array of page pointers to convert
664 * @nr_pages: length of the page vector
 
665 *
666 * This helper creates an sg table object from a set of pages
667 * the driver is responsible for mapping the pages into the
668 * importers address space for use with dma_buf itself.
 
 
669 */
670struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
671{
672	struct sg_table *sg = NULL;
673	int ret;
674
675	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
676	if (!sg) {
677		ret = -ENOMEM;
678		goto out;
 
 
679	}
680
681	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
682				nr_pages << PAGE_SHIFT, GFP_KERNEL);
683	if (ret)
684		goto out;
685
686	return sg;
687out:
688	kfree(sg);
689	return ERR_PTR(ret);
690}
691EXPORT_SYMBOL(drm_prime_pages_to_sg);
692
693/**
694 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
695 * @sgt: scatter-gather table to convert
696 * @pages: array of page pointers to store the page array in
697 * @addrs: optional array to store the dma bus address of each page
698 * @max_pages: size of both the passed-in arrays
 
699 *
700 * Exports an sg table into an array of pages and addresses. This is currently
701 * required by the TTM driver in order to do correct fault handling.
702 */
703int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
704				     dma_addr_t *addrs, int max_pages)
705{
706	unsigned count;
707	struct scatterlist *sg;
708	struct page *page;
709	u32 len;
710	int pg_index;
711	dma_addr_t addr;
712
713	pg_index = 0;
714	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
715		len = sg->length;
716		page = sg_page(sg);
717		addr = sg_dma_address(sg);
718
719		while (len > 0) {
720			if (WARN_ON(pg_index >= max_pages))
721				return -1;
722			pages[pg_index] = page;
723			if (addrs)
724				addrs[pg_index] = addr;
725
726			page++;
727			addr += PAGE_SIZE;
728			len -= PAGE_SIZE;
729			pg_index++;
730		}
731	}
732	return 0;
733}
734EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
735
736/**
737 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
738 * @obj: GEM object which was created from a dma-buf
739 * @sg: the sg-table which was pinned at import time
740 *
741 * This is the cleanup functions which GEM drivers need to call when they use
742 * @drm_gem_prime_import to import dma-bufs.
743 */
744void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
745{
746	struct dma_buf_attachment *attach;
747	struct dma_buf *dma_buf;
 
748	attach = obj->import_attach;
749	if (sg)
750		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
751	dma_buf = attach->dmabuf;
752	dma_buf_detach(attach->dmabuf, attach);
753	/* remove the reference */
754	dma_buf_put(dma_buf);
755}
756EXPORT_SYMBOL(drm_prime_gem_destroy);
757
758void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
759{
760	INIT_LIST_HEAD(&prime_fpriv->head);
761	mutex_init(&prime_fpriv->lock);
762}
763
764void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
765{
766	/* by now drm_gem_release should've made sure the list is empty */
767	WARN_ON(!list_empty(&prime_fpriv->head));
768}
v6.13.7
   1/*
   2 * Copyright © 2012 Red Hat
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *      Dave Airlie <airlied@redhat.com>
  25 *      Rob Clark <rob.clark@linaro.org>
  26 *
  27 */
  28
  29#include <linux/export.h>
  30#include <linux/dma-buf.h>
  31#include <linux/rbtree.h>
  32#include <linux/module.h>
  33
  34#include <drm/drm.h>
  35#include <drm/drm_drv.h>
  36#include <drm/drm_file.h>
  37#include <drm/drm_framebuffer.h>
  38#include <drm/drm_gem.h>
  39#include <drm/drm_prime.h>
  40
  41#include "drm_internal.h"
  42
  43MODULE_IMPORT_NS("DMA_BUF");
  44
  45/**
  46 * DOC: overview and lifetime rules
  47 *
  48 * Similar to GEM global names, PRIME file descriptors are also used to share
  49 * buffer objects across processes. They offer additional security: as file
  50 * descriptors must be explicitly sent over UNIX domain sockets to be shared
  51 * between applications, they can't be guessed like the globally unique GEM
  52 * names.
  53 *
  54 * Drivers that support the PRIME API implement the drm_gem_object_funcs.export
  55 * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for
  56 * drivers are all individually exported for drivers which need to overwrite
  57 * or reimplement some of them.
  58 *
  59 * Reference Counting for GEM Drivers
  60 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  61 *
  62 * On the export the &dma_buf holds a reference to the exported buffer object,
  63 * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
  64 * IOCTL, when it first calls &drm_gem_object_funcs.export
  65 * and stores the exporting GEM object in the &dma_buf.priv field. This
  66 * reference needs to be released when the final reference to the &dma_buf
  67 * itself is dropped and its &dma_buf_ops.release function is called.  For
  68 * GEM-based drivers, the &dma_buf should be exported using
  69 * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
  70 *
  71 * Thus the chain of references always flows in one direction, avoiding loops:
  72 * importing GEM object -> dma-buf -> exported GEM bo. A further complication
  73 * are the lookup caches for import and export. These are required to guarantee
  74 * that any given object will always have only one unique userspace handle. This
  75 * is required to allow userspace to detect duplicated imports, since some GEM
  76 * drivers do fail command submissions if a given buffer object is listed more
  77 * than once. These import and export caches in &drm_prime_file_private only
  78 * retain a weak reference, which is cleaned up when the corresponding object is
  79 * released.
  80 *
  81 * Self-importing: If userspace is using PRIME as a replacement for flink then
  82 * it will get a fd->handle request for a GEM object that it created.  Drivers
  83 * should detect this situation and return back the underlying object from the
  84 * dma-buf private. For GEM based drivers this is handled in
  85 * drm_gem_prime_import() already.
  86 */
  87
  88struct drm_prime_member {
 
  89	struct dma_buf *dma_buf;
  90	uint32_t handle;
 
  91
  92	struct rb_node dmabuf_rb;
  93	struct rb_node handle_rb;
 
  94};
  95
  96static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
  97				    struct dma_buf *dma_buf, uint32_t handle)
  98{
  99	struct drm_prime_member *member;
 100	struct rb_node **p, *rb;
 101
 102	member = kmalloc(sizeof(*member), GFP_KERNEL);
 103	if (!member)
 104		return -ENOMEM;
 105
 106	get_dma_buf(dma_buf);
 107	member->dma_buf = dma_buf;
 108	member->handle = handle;
 109
 110	rb = NULL;
 111	p = &prime_fpriv->dmabufs.rb_node;
 112	while (*p) {
 113		struct drm_prime_member *pos;
 114
 115		rb = *p;
 116		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
 117		if (dma_buf > pos->dma_buf)
 118			p = &rb->rb_right;
 119		else
 120			p = &rb->rb_left;
 121	}
 122	rb_link_node(&member->dmabuf_rb, rb, p);
 123	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
 124
 125	rb = NULL;
 126	p = &prime_fpriv->handles.rb_node;
 127	while (*p) {
 128		struct drm_prime_member *pos;
 129
 130		rb = *p;
 131		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
 132		if (handle > pos->handle)
 133			p = &rb->rb_right;
 134		else
 135			p = &rb->rb_left;
 136	}
 137	rb_link_node(&member->handle_rb, rb, p);
 138	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
 139
 140	return 0;
 141}
 142
 143static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
 144						      uint32_t handle)
 145{
 146	struct rb_node *rb;
 147
 148	rb = prime_fpriv->handles.rb_node;
 149	while (rb) {
 150		struct drm_prime_member *member;
 151
 152		member = rb_entry(rb, struct drm_prime_member, handle_rb);
 153		if (member->handle == handle)
 154			return member->dma_buf;
 155		else if (member->handle < handle)
 156			rb = rb->rb_right;
 157		else
 158			rb = rb->rb_left;
 159	}
 160
 161	return NULL;
 162}
 163
 164static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
 165				       struct dma_buf *dma_buf,
 166				       uint32_t *handle)
 167{
 168	struct rb_node *rb;
 169
 170	rb = prime_fpriv->dmabufs.rb_node;
 171	while (rb) {
 172		struct drm_prime_member *member;
 173
 174		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
 175		if (member->dma_buf == dma_buf) {
 176			*handle = member->handle;
 177			return 0;
 178		} else if (member->dma_buf < dma_buf) {
 179			rb = rb->rb_right;
 180		} else {
 181			rb = rb->rb_left;
 182		}
 183	}
 184
 185	return -ENOENT;
 186}
 187
 188void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
 189				 uint32_t handle)
 
 190{
 191	struct rb_node *rb;
 
 
 192
 193	mutex_lock(&prime_fpriv->lock);
 
 
 
 
 
 194
 195	rb = prime_fpriv->handles.rb_node;
 196	while (rb) {
 197		struct drm_prime_member *member;
 198
 199		member = rb_entry(rb, struct drm_prime_member, handle_rb);
 200		if (member->handle == handle) {
 201			rb_erase(&member->handle_rb, &prime_fpriv->handles);
 202			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
 
 
 
 
 
 
 
 
 
 
 
 
 203
 204			dma_buf_put(member->dma_buf);
 205			kfree(member);
 206			break;
 207		} else if (member->handle < handle) {
 208			rb = rb->rb_right;
 209		} else {
 210			rb = rb->rb_left;
 211		}
 212	}
 213
 214	mutex_unlock(&prime_fpriv->lock);
 
 
 215}
 216
 217void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
 
 218{
 219	mutex_init(&prime_fpriv->lock);
 220	prime_fpriv->dmabufs = RB_ROOT;
 221	prime_fpriv->handles = RB_ROOT;
 
 
 
 
 
 
 222}
 223
 224void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
 
 225{
 226	/* by now drm_gem_release should've made sure the list is empty */
 227	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
 228}
 
 
 
 
 
 
 
 229
 230/**
 231 * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
 232 * @dev: parent device for the exported dmabuf
 233 * @exp_info: the export information used by dma_buf_export()
 234 *
 235 * This wraps dma_buf_export() for use by generic GEM drivers that are using
 236 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
 237 * a reference to the &drm_device and the exported &drm_gem_object (stored in
 238 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
 239 *
 240 * Returns the new dmabuf.
 241 */
 242struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
 243				      struct dma_buf_export_info *exp_info)
 244{
 245	struct drm_gem_object *obj = exp_info->priv;
 246	struct dma_buf *dma_buf;
 247
 248	dma_buf = dma_buf_export(exp_info);
 249	if (IS_ERR(dma_buf))
 250		return dma_buf;
 251
 252	drm_dev_get(dev);
 253	drm_gem_object_get(obj);
 254	dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
 
 
 
 
 
 
 
 255
 256	return dma_buf;
 
 
 
 
 
 
 
 257}
 258EXPORT_SYMBOL(drm_gem_dmabuf_export);
 259
 260/**
 261 * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
 262 * @dma_buf: buffer to be released
 263 *
 264 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
 265 * must use this in their &dma_buf_ops structure as the release callback.
 266 * drm_gem_dmabuf_release() should be used in conjunction with
 267 * drm_gem_dmabuf_export().
 268 */
 269void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
 270{
 271	struct drm_gem_object *obj = dma_buf->priv;
 272	struct drm_device *dev = obj->dev;
 273
 274	/* drop the reference on the export fd holds */
 275	drm_gem_object_put(obj);
 276
 277	drm_dev_put(dev);
 278}
 279EXPORT_SYMBOL(drm_gem_dmabuf_release);
 280
 281/**
 282 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
 283 * @dev: drm_device to import into
 284 * @file_priv: drm file-private structure
 285 * @prime_fd: fd id of the dma-buf which should be imported
 286 * @handle: pointer to storage for the handle of the imported buffer object
 287 *
 288 * This is the PRIME import function which must be used mandatorily by GEM
 289 * drivers to ensure correct lifetime management of the underlying GEM object.
 290 * The actual importing of GEM object from the dma-buf is done through the
 291 * &drm_driver.gem_prime_import driver callback.
 292 *
 293 * Returns 0 on success or a negative error code on failure.
 294 */
 295int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 296			       struct drm_file *file_priv, int prime_fd,
 297			       uint32_t *handle)
 298{
 299	struct dma_buf *dma_buf;
 300	struct drm_gem_object *obj;
 301	int ret;
 302
 303	dma_buf = dma_buf_get(prime_fd);
 304	if (IS_ERR(dma_buf))
 305		return PTR_ERR(dma_buf);
 306
 307	mutex_lock(&file_priv->prime.lock);
 
 
 
 308
 309	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
 310			dma_buf, handle);
 311	if (ret == 0)
 312		goto out_put;
 313
 314	/* never seen this one, need to import */
 315	mutex_lock(&dev->object_name_lock);
 316	if (dev->driver->gem_prime_import)
 317		obj = dev->driver->gem_prime_import(dev, dma_buf);
 318	else
 319		obj = drm_gem_prime_import(dev, dma_buf);
 320	if (IS_ERR(obj)) {
 321		ret = PTR_ERR(obj);
 322		goto out_unlock;
 323	}
 324
 325	if (obj->dma_buf) {
 326		WARN_ON(obj->dma_buf != dma_buf);
 327	} else {
 328		obj->dma_buf = dma_buf;
 329		get_dma_buf(dma_buf);
 330	}
 331
 332	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
 333	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
 334	drm_gem_object_put(obj);
 335	if (ret)
 336		goto out_put;
 
 337
 338	ret = drm_prime_add_buf_handle(&file_priv->prime,
 339			dma_buf, *handle);
 340	mutex_unlock(&file_priv->prime.lock);
 341	if (ret)
 342		goto fail;
 343
 344	dma_buf_put(dma_buf);
 345
 346	return 0;
 
 
 
 
 347
 348fail:
 349	/* hmm, if driver attached, we are relying on the free-object path
 350	 * to detach.. which seems ok..
 351	 */
 352	drm_gem_handle_delete(file_priv, *handle);
 353	dma_buf_put(dma_buf);
 354	return ret;
 355
 356out_unlock:
 357	mutex_unlock(&dev->object_name_lock);
 358out_put:
 359	mutex_unlock(&file_priv->prime.lock);
 360	dma_buf_put(dma_buf);
 361	return ret;
 362}
 363EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
 364
 365int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 366				 struct drm_file *file_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367{
 368	struct drm_prime_handle *args = data;
 
 
 
 
 
 369
 370	if (dev->driver->prime_fd_to_handle) {
 371		return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd,
 372						       &args->handle);
 373	}
 374
 375	return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
 376}
 
 377
 378static struct dma_buf *export_and_register_object(struct drm_device *dev,
 379						  struct drm_gem_object *obj,
 380						  uint32_t flags)
 381{
 382	struct dma_buf *dmabuf;
 383
 384	/* prevent races with concurrent gem_close. */
 385	if (obj->handle_count == 0) {
 386		dmabuf = ERR_PTR(-ENOENT);
 387		return dmabuf;
 388	}
 389
 390	if (obj->funcs && obj->funcs->export)
 391		dmabuf = obj->funcs->export(obj, flags);
 392	else
 393		dmabuf = drm_gem_prime_export(obj, flags);
 394	if (IS_ERR(dmabuf)) {
 395		/* normally the created dma-buf takes ownership of the ref,
 396		 * but if that fails then drop the ref
 397		 */
 398		return dmabuf;
 399	}
 400
 401	/*
 402	 * Note that callers do not need to clean up the export cache
 403	 * since the check for obj->handle_count guarantees that someone
 404	 * will clean it up.
 405	 */
 406	obj->dma_buf = dmabuf;
 407	get_dma_buf(obj->dma_buf);
 
 
 408
 409	return dmabuf;
 410}
 411
 412/**
 413 * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers
 414 * @dev: dev to export the buffer from
 415 * @file_priv: drm file-private structure
 416 * @handle: buffer handle to export
 417 * @flags: flags like DRM_CLOEXEC
 
 418 *
 419 * This is the PRIME export function which must be used mandatorily by GEM
 420 * drivers to ensure correct lifetime management of the underlying GEM object.
 421 * The actual exporting from GEM object to a dma-buf is done through the
 422 * &drm_gem_object_funcs.export callback.
 423 *
 424 * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it
 425 * has created, without attaching it to any file descriptors.  The difference
 426 * between those two is similar to that between anon_inode_getfile() and
 427 * anon_inode_getfd(); insertion into descriptor table is something you
 428 * can not revert if any cleanup is needed, so the descriptor-returning
 429 * variants should only be used when you are past the last failure exit
 430 * and the only thing left is passing the new file descriptor to userland.
 431 * When all you need is the object itself or when you need to do something
 432 * else that might fail, use that one instead.
 433 */
 434struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
 435			       struct drm_file *file_priv, uint32_t handle,
 436			       uint32_t flags)
 
 437{
 438	struct drm_gem_object *obj;
 439	int ret = 0;
 440	struct dma_buf *dmabuf;
 441
 442	mutex_lock(&file_priv->prime.lock);
 443	obj = drm_gem_object_lookup(file_priv, handle);
 444	if (!obj)  {
 445		dmabuf = ERR_PTR(-ENOENT);
 446		goto out_unlock;
 447	}
 448
 449	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
 450	if (dmabuf) {
 451		get_dma_buf(dmabuf);
 452		goto out;
 453	}
 454
 455	mutex_lock(&dev->object_name_lock);
 456	/* re-export the original imported object */
 457	if (obj->import_attach) {
 458		dmabuf = obj->import_attach->dmabuf;
 459		get_dma_buf(dmabuf);
 460		goto out_have_obj;
 461	}
 462
 463	if (obj->dma_buf) {
 464		get_dma_buf(obj->dma_buf);
 465		dmabuf = obj->dma_buf;
 466		goto out_have_obj;
 467	}
 468
 469	dmabuf = export_and_register_object(dev, obj, flags);
 470	if (IS_ERR(dmabuf)) {
 471		/* normally the created dma-buf takes ownership of the ref,
 472		 * but if that fails then drop the ref
 473		 */
 
 474		mutex_unlock(&dev->object_name_lock);
 475		goto out;
 476	}
 477
 478out_have_obj:
 479	/*
 480	 * If we've exported this buffer then cheat and add it to the import list
 481	 * so we get the correct handle back. We must do this under the
 482	 * protection of dev->object_name_lock to ensure that a racing gem close
 483	 * ioctl doesn't miss to remove this buffer handle from the cache.
 484	 */
 485	ret = drm_prime_add_buf_handle(&file_priv->prime,
 486				       dmabuf, handle);
 487	mutex_unlock(&dev->object_name_lock);
 488	if (ret) {
 489		dma_buf_put(dmabuf);
 490		dmabuf = ERR_PTR(ret);
 491	}
 492out:
 493	drm_gem_object_put(obj);
 494out_unlock:
 495	mutex_unlock(&file_priv->prime.lock);
 496	return dmabuf;
 497}
 498EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf);
 499
 500/**
 501 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
 502 * @dev: dev to export the buffer from
 503 * @file_priv: drm file-private structure
 504 * @handle: buffer handle to export
 505 * @flags: flags like DRM_CLOEXEC
 506 * @prime_fd: pointer to storage for the fd id of the create dma-buf
 507 *
 508 * This is the PRIME export function which must be used mandatorily by GEM
 509 * drivers to ensure correct lifetime management of the underlying GEM object.
 510 * The actual exporting from GEM object to a dma-buf is done through the
 511 * &drm_gem_object_funcs.export callback.
 512 */
 513int drm_gem_prime_handle_to_fd(struct drm_device *dev,
 514			       struct drm_file *file_priv, uint32_t handle,
 515			       uint32_t flags,
 516			       int *prime_fd)
 517{
 518	struct dma_buf *dmabuf;
 519	int fd = get_unused_fd_flags(flags);
 520
 521	if (fd < 0)
 522		return fd;
 523
 524	dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags);
 525	if (IS_ERR(dmabuf)) {
 526		put_unused_fd(fd);
 527		return PTR_ERR(dmabuf);
 528	}
 529
 530	fd_install(fd, dmabuf->file);
 531	*prime_fd = fd;
 532	return 0;
 533}
 534EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 535
 536int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
 537				 struct drm_file *file_priv)
 538{
 539	struct drm_prime_handle *args = data;
 540
 541	/* check flags are valid */
 542	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
 543		return -EINVAL;
 544
 545	if (dev->driver->prime_handle_to_fd) {
 546		return dev->driver->prime_handle_to_fd(dev, file_priv,
 547						       args->handle, args->flags,
 548						       &args->fd);
 549	}
 550	return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle,
 551					  args->flags, &args->fd);
 552}
 553
 554/**
 555 * DOC: PRIME Helpers
 556 *
 557 * Drivers can implement &drm_gem_object_funcs.export and
 558 * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
 559 * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
 560 * implement dma-buf support in terms of some lower-level helpers, which are
 561 * again exported for drivers to use individually:
 562 *
 563 * Exporting buffers
 564 * ~~~~~~~~~~~~~~~~~
 565 *
 566 * Optional pinning of buffers is handled at dma-buf attach and detach time in
 567 * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
 568 * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
 569 * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is
 570 * unimplemented, exports into another device are rejected.
 571 *
 572 * For kernel-internal access there's drm_gem_dmabuf_vmap() and
 573 * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
 574 * drm_gem_dmabuf_mmap().
 575 *
 576 * Note that these export helpers can only be used if the underlying backing
 577 * storage is fully coherent and either permanently pinned, or it is safe to pin
 578 * it indefinitely.
 579 *
 580 * FIXME: The underlying helper functions are named rather inconsistently.
 581 *
 582 * Importing buffers
 583 * ~~~~~~~~~~~~~~~~~
 584 *
 585 * Importing dma-bufs using drm_gem_prime_import() relies on
 586 * &drm_driver.gem_prime_import_sg_table.
 587 *
 588 * Note that similarly to the export helpers this permanently pins the
 589 * underlying backing storage. Which is ok for scanout, but is not the best
 590 * option for sharing lots of buffers for rendering.
 591 */
 592
 593/**
 594 * drm_gem_map_attach - dma_buf attach implementation for GEM
 595 * @dma_buf: buffer to attach device to
 596 * @attach: buffer attachment data
 597 *
 598 * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
 599 * used as the &dma_buf_ops.attach callback. Must be used together with
 600 * drm_gem_map_detach().
 601 *
 602 * Returns 0 on success, negative error code on failure.
 603 */
 604int drm_gem_map_attach(struct dma_buf *dma_buf,
 605		       struct dma_buf_attachment *attach)
 606{
 607	struct drm_gem_object *obj = dma_buf->priv;
 608
 
 
 609	/*
 610	 * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
 611	 * that implement their own ->map_dma_buf() do not.
 
 
 612	 */
 613	if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
 614	    !obj->funcs->get_sg_table)
 615		return -ENOSYS;
 616
 617	return drm_gem_pin(obj);
 618}
 619EXPORT_SYMBOL(drm_gem_map_attach);
 620
 621/**
 622 * drm_gem_map_detach - dma_buf detach implementation for GEM
 623 * @dma_buf: buffer to detach from
 624 * @attach: attachment to be detached
 625 *
 626 * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
 627 * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
 628 * &dma_buf_ops.detach callback.
 629 */
 630void drm_gem_map_detach(struct dma_buf *dma_buf,
 631			struct dma_buf_attachment *attach)
 632{
 633	struct drm_gem_object *obj = dma_buf->priv;
 634
 635	drm_gem_unpin(obj);
 636}
 637EXPORT_SYMBOL(drm_gem_map_detach);
 638
 639/**
 640 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
 641 * @attach: attachment whose scatterlist is to be returned
 642 * @dir: direction of DMA transfer
 643 *
 644 * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
 645 * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
 646 * with drm_gem_unmap_dma_buf().
 647 *
 648 * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
 649 * on error. May return -EINTR if it is interrupted by a signal.
 650 */
 651struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 652				     enum dma_data_direction dir)
 653{
 654	struct drm_gem_object *obj = attach->dmabuf->priv;
 655	struct sg_table *sgt;
 656	int ret;
 657
 658	if (WARN_ON(dir == DMA_NONE))
 659		return ERR_PTR(-EINVAL);
 660
 661	if (WARN_ON(!obj->funcs->get_sg_table))
 662		return ERR_PTR(-ENOSYS);
 663
 664	sgt = obj->funcs->get_sg_table(obj);
 665	if (IS_ERR(sgt))
 666		return sgt;
 667
 668	ret = dma_map_sgtable(attach->dev, sgt, dir,
 669			      DMA_ATTR_SKIP_CPU_SYNC);
 670	if (ret) {
 671		sg_free_table(sgt);
 672		kfree(sgt);
 673		sgt = ERR_PTR(ret);
 674	}
 675
 676	return sgt;
 677}
 678EXPORT_SYMBOL(drm_gem_map_dma_buf);
 679
 680/**
 681 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
 682 * @attach: attachment to unmap buffer from
 683 * @sgt: scatterlist info of the buffer to unmap
 684 * @dir: direction of DMA transfer
 685 *
 686 * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
 687 */
 688void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 689			   struct sg_table *sgt,
 690			   enum dma_data_direction dir)
 691{
 692	if (!sgt)
 693		return;
 694
 695	dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
 696	sg_free_table(sgt);
 697	kfree(sgt);
 698}
 699EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
 700
 701/**
 702 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
 703 * @dma_buf: buffer to be mapped
 704 * @map: the virtual address of the buffer
 705 *
 706 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
 707 * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
 708 * The kernel virtual address is returned in map.
 709 *
 710 * Returns 0 on success or a negative errno code otherwise.
 711 */
 712int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
 713{
 714	struct drm_gem_object *obj = dma_buf->priv;
 715
 716	return drm_gem_vmap(obj, map);
 717}
 718EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
 719
 720/**
 721 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
 722 * @dma_buf: buffer to be unmapped
 723 * @map: the virtual address of the buffer
 724 *
 725 * Releases a kernel virtual mapping. This can be used as the
 726 * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
 727 */
 728void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
 729{
 730	struct drm_gem_object *obj = dma_buf->priv;
 731
 732	drm_gem_vunmap(obj, map);
 733}
 734EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 735
 736/**
 737 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
 738 * @obj: GEM object
 739 * @vma: Virtual address range
 740 *
 741 * This function sets up a userspace mapping for PRIME exported buffers using
 742 * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
 743 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
 744 * called to set up the mapping.
 745 */
 746int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 747{
 748	struct drm_file *priv;
 749	struct file *fil;
 750	int ret;
 751
 752	/* Add the fake offset */
 753	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
 754
 755	if (obj->funcs && obj->funcs->mmap) {
 756		vma->vm_ops = obj->funcs->vm_ops;
 757
 758		drm_gem_object_get(obj);
 759		ret = obj->funcs->mmap(obj, vma);
 760		if (ret) {
 761			drm_gem_object_put(obj);
 762			return ret;
 763		}
 764		vma->vm_private_data = obj;
 765		return 0;
 766	}
 767
 768	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 769	fil = kzalloc(sizeof(*fil), GFP_KERNEL);
 770	if (!priv || !fil) {
 771		ret = -ENOMEM;
 772		goto out;
 773	}
 774
 775	/* Used by drm_gem_mmap() to lookup the GEM object */
 776	priv->minor = obj->dev->primary;
 777	fil->private_data = priv;
 778
 779	ret = drm_vma_node_allow(&obj->vma_node, priv);
 780	if (ret)
 781		goto out;
 782
 783	ret = obj->dev->driver->fops->mmap(fil, vma);
 784
 785	drm_vma_node_revoke(&obj->vma_node, priv);
 786out:
 787	kfree(priv);
 788	kfree(fil);
 
 789
 790	return ret;
 791}
 792EXPORT_SYMBOL(drm_gem_prime_mmap);
 793
 794/**
 795 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
 796 * @dma_buf: buffer to be mapped
 797 * @vma: virtual address range
 798 *
 799 * Provides memory mapping for the buffer. This can be used as the
 800 * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap().
 801 *
 802 * Returns 0 on success or a negative error code on failure.
 803 */
 804int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 805{
 806	struct drm_gem_object *obj = dma_buf->priv;
 807
 808	return drm_gem_prime_mmap(obj, vma);
 809}
 810EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
 811
 812static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 813	.cache_sgt_mapping = true,
 814	.attach = drm_gem_map_attach,
 815	.detach = drm_gem_map_detach,
 816	.map_dma_buf = drm_gem_map_dma_buf,
 817	.unmap_dma_buf = drm_gem_unmap_dma_buf,
 818	.release = drm_gem_dmabuf_release,
 819	.mmap = drm_gem_dmabuf_mmap,
 820	.vmap = drm_gem_dmabuf_vmap,
 821	.vunmap = drm_gem_dmabuf_vunmap,
 822};
 823
 824/**
 825 * drm_prime_pages_to_sg - converts a page array into an sg list
 826 * @dev: DRM device
 827 * @pages: pointer to the array of page pointers to convert
 828 * @nr_pages: length of the page vector
 829 *
 830 * This helper creates an sg table object from a set of pages
 831 * the driver is responsible for mapping the pages into the
 832 * importers address space for use with dma_buf itself.
 833 *
 834 * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
 835 */
 836struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
 837				       struct page **pages, unsigned int nr_pages)
 838{
 839	struct sg_table *sg;
 840	size_t max_segment = 0;
 841	int err;
 842
 843	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 844	if (!sg)
 845		return ERR_PTR(-ENOMEM);
 846
 847	if (dev)
 848		max_segment = dma_max_mapping_size(dev->dev);
 849	if (max_segment == 0)
 850		max_segment = UINT_MAX;
 851	err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
 852						(unsigned long)nr_pages << PAGE_SHIFT,
 853						max_segment, GFP_KERNEL);
 854	if (err) {
 855		kfree(sg);
 856		sg = ERR_PTR(err);
 857	}
 858	return sg;
 859}
 860EXPORT_SYMBOL(drm_prime_pages_to_sg);
 861
 862/**
 863 * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
 864 * @sgt: sg_table describing the buffer to check
 865 *
 866 * This helper calculates the contiguous size in the DMA address space
 867 * of the buffer described by the provided sg_table.
 868 *
 869 * This is useful for implementing
 870 * &drm_gem_object_funcs.gem_prime_import_sg_table.
 871 */
 872unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
 873{
 874	dma_addr_t expected = sg_dma_address(sgt->sgl);
 875	struct scatterlist *sg;
 876	unsigned long size = 0;
 877	int i;
 878
 879	for_each_sgtable_dma_sg(sgt, sg, i) {
 880		unsigned int len = sg_dma_len(sg);
 881
 882		if (!len)
 883			break;
 884		if (sg_dma_address(sg) != expected)
 885			break;
 886		expected += len;
 887		size += len;
 888	}
 889	return size;
 890}
 891EXPORT_SYMBOL(drm_prime_get_contiguous_size);
 892
 893/**
 894 * drm_gem_prime_export - helper library implementation of the export callback
 895 * @obj: GEM object to export
 896 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
 897 *
 898 * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
 899 * using the PRIME helpers. It is used as the default in
 900 * drm_gem_prime_handle_to_fd().
 901 */
 902struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
 903				     int flags)
 904{
 905	struct drm_device *dev = obj->dev;
 906	struct dma_buf_export_info exp_info = {
 907		.exp_name = KBUILD_MODNAME, /* white lie for debug */
 908		.owner = dev->driver->fops->owner,
 909		.ops = &drm_gem_prime_dmabuf_ops,
 910		.size = obj->size,
 911		.flags = flags,
 912		.priv = obj,
 913		.resv = obj->resv,
 914	};
 915
 916	return drm_gem_dmabuf_export(dev, &exp_info);
 917}
 918EXPORT_SYMBOL(drm_gem_prime_export);
 919
 920/**
 921 * drm_gem_prime_import_dev - core implementation of the import callback
 922 * @dev: drm_device to import into
 923 * @dma_buf: dma-buf object to import
 924 * @attach_dev: struct device to dma_buf attach
 925 *
 926 * This is the core of drm_gem_prime_import(). It's designed to be called by
 927 * drivers who want to use a different device structure than &drm_device.dev for
 928 * attaching via dma_buf. This function calls
 929 * &drm_driver.gem_prime_import_sg_table internally.
 930 *
 931 * Drivers must arrange to call drm_prime_gem_destroy() from their
 932 * &drm_gem_object_funcs.free hook when using this function.
 933 */
 934struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
 935					    struct dma_buf *dma_buf,
 936					    struct device *attach_dev)
 937{
 938	struct dma_buf_attachment *attach;
 939	struct sg_table *sgt;
 940	struct drm_gem_object *obj;
 941	int ret;
 942
 943	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
 944		obj = dma_buf->priv;
 945		if (obj->dev == dev) {
 946			/*
 947			 * Importing dmabuf exported from our own gem increases
 948			 * refcount on gem itself instead of f_count of dmabuf.
 949			 */
 950			drm_gem_object_get(obj);
 951			return obj;
 952		}
 953	}
 954
 955	if (!dev->driver->gem_prime_import_sg_table)
 956		return ERR_PTR(-EINVAL);
 957
 958	attach = dma_buf_attach(dma_buf, attach_dev);
 959	if (IS_ERR(attach))
 960		return ERR_CAST(attach);
 961
 962	get_dma_buf(dma_buf);
 963
 964	sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
 965	if (IS_ERR(sgt)) {
 966		ret = PTR_ERR(sgt);
 967		goto fail_detach;
 968	}
 969
 970	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
 971	if (IS_ERR(obj)) {
 972		ret = PTR_ERR(obj);
 973		goto fail_unmap;
 974	}
 975
 976	obj->import_attach = attach;
 977	obj->resv = dma_buf->resv;
 978
 979	return obj;
 980
 981fail_unmap:
 982	dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
 983fail_detach:
 984	dma_buf_detach(dma_buf, attach);
 985	dma_buf_put(dma_buf);
 986
 987	return ERR_PTR(ret);
 988}
 989EXPORT_SYMBOL(drm_gem_prime_import_dev);
 990
 991/**
 992 * drm_gem_prime_import - helper library implementation of the import callback
 993 * @dev: drm_device to import into
 994 * @dma_buf: dma-buf object to import
 
 
 995 *
 996 * This is the implementation of the gem_prime_import functions for GEM drivers
 997 * using the PRIME helpers. Drivers can use this as their
 998 * &drm_driver.gem_prime_import implementation. It is used as the default
 999 * implementation in drm_gem_prime_fd_to_handle().
1000 *
1001 * Drivers must arrange to call drm_prime_gem_destroy() from their
1002 * &drm_gem_object_funcs.free hook when using this function.
1003 */
1004struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1005					    struct dma_buf *dma_buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006{
1007	return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
 
 
 
 
 
 
 
 
 
1008}
1009EXPORT_SYMBOL(drm_gem_prime_import);
1010
1011/**
1012 * drm_prime_sg_to_page_array - convert an sg table into a page array
1013 * @sgt: scatter-gather table to convert
1014 * @pages: array of page pointers to store the pages in
1015 * @max_entries: size of the passed-in array
1016 *
1017 * Exports an sg table into an array of pages.
1018 *
1019 * This function is deprecated and strongly discouraged to be used.
1020 * The page array is only useful for page faults and those can corrupt fields
1021 * in the struct page if they are not handled by the exporting driver.
1022 */
1023int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
1024					    struct page **pages,
1025					    int max_entries)
1026{
1027	struct sg_page_iter page_iter;
1028	struct page **p = pages;
1029
1030	for_each_sgtable_page(sgt, &page_iter, 0) {
1031		if (WARN_ON(p - pages >= max_entries))
1032			return -1;
1033		*p++ = sg_page_iter_page(&page_iter);
1034	}
1035	return 0;
 
 
 
 
 
 
 
 
 
1036}
1037EXPORT_SYMBOL(drm_prime_sg_to_page_array);
1038
1039/**
1040 * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
1041 * @sgt: scatter-gather table to convert
1042 * @addrs: array to store the dma bus address of each page
1043 * @max_entries: size of both the passed-in arrays
1044 *
1045 * Exports an sg table into an array of addresses.
1046 *
1047 * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
1048 * implementation.
1049 */
1050int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
1051				   int max_entries)
1052{
1053	struct sg_dma_page_iter dma_iter;
1054	dma_addr_t *a = addrs;
1055
1056	for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1057		if (WARN_ON(a - addrs >= max_entries))
1058			return -1;
1059		*a++ = sg_page_iter_dma_address(&dma_iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1060	}
1061	return 0;
1062}
1063EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
1064
1065/**
1066 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1067 * @obj: GEM object which was created from a dma-buf
1068 * @sg: the sg-table which was pinned at import time
1069 *
1070 * This is the cleanup functions which GEM drivers need to call when they use
1071 * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1072 */
1073void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1074{
1075	struct dma_buf_attachment *attach;
1076	struct dma_buf *dma_buf;
1077
1078	attach = obj->import_attach;
1079	if (sg)
1080		dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL);
1081	dma_buf = attach->dmabuf;
1082	dma_buf_detach(attach->dmabuf, attach);
1083	/* remove the reference */
1084	dma_buf_put(dma_buf);
1085}
1086EXPORT_SYMBOL(drm_prime_gem_destroy);