Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2009-2023 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 30 *
 31 * While no substantial code is shared, the prime code is inspired by
 32 * drm_prime.c, with
 33 * Authors:
 34 *      Dave Airlie <airlied@redhat.com>
 35 *      Rob Clark <rob.clark@linaro.org>
 36 */
 37/** @file ttm_ref_object.c
 38 *
 39 * Base- and reference object implementation for the various
 40 * ttm objects. Implements reference counting, minimal security checks
 41 * and release on file close.
 42 */
 43
 44
 45#define pr_fmt(fmt) "[TTM] " fmt
 46
 47#include "ttm_object.h"
 48#include "vmwgfx_drv.h"
 49
 50#include <linux/list.h>
 51#include <linux/spinlock.h>
 52#include <linux/slab.h>
 53#include <linux/atomic.h>
 54#include <linux/module.h>
 55#include <linux/hashtable.h>
 56
 57MODULE_IMPORT_NS("DMA_BUF");
 58
 59#define VMW_TTM_OBJECT_REF_HT_ORDER 10
 60
 61/**
 62 * struct ttm_object_file
 63 *
 64 * @tdev: Pointer to the ttm_object_device.
 65 *
 66 * @lock: Lock that protects the ref_list list and the
 67 * ref_hash hash tables.
 68 *
 69 * @ref_list: List of ttm_ref_objects to be destroyed at
 70 * file release.
 71 *
 72 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
 73 * for fast lookup of ref objects given a base object.
 74 *
 75 * @refcount: reference/usage count
 76 */
 
 
 
 
 
 
 
 
 
 
 77struct ttm_object_file {
 78	struct ttm_object_device *tdev;
 79	spinlock_t lock;
 80	struct list_head ref_list;
 81	DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER);
 82	struct kref refcount;
 83};
 84
 85/*
 86 * struct ttm_object_device
 87 *
 88 * @object_lock: lock that protects idr.
 
 
 
 
 89 *
 90 * This is the per-device data structure needed for ttm object management.
 91 */
 92
 93struct ttm_object_device {
 94	spinlock_t object_lock;
 
 
 
 95	struct dma_buf_ops ops;
 96	void (*dmabuf_release)(struct dma_buf *dma_buf);
 
 97	struct idr idr;
 98};
 99
100/*
101 * struct ttm_ref_object
102 *
103 * @hash: Hash entry for the per-file object reference hash.
104 *
105 * @head: List entry for the per-file list of ref-objects.
106 *
107 * @kref: Ref count.
108 *
109 * @obj: Base object this ref object is referencing.
110 *
111 * @ref_type: Type of ref object.
112 *
113 * This is similar to an idr object, but it also has a hash table entry
114 * that allows lookup with a pointer to the referenced object as a key. In
115 * that way, one can easily detect whether a base object is referenced by
116 * a particular ttm_object_file. It also carries a ref count to avoid creating
117 * multiple ref objects if a ttm_object_file references the same base
118 * object more than once.
119 */
120
121struct ttm_ref_object {
122	struct rcu_head rcu_head;
123	struct vmwgfx_hash_item hash;
124	struct list_head head;
125	struct kref kref;
 
126	struct ttm_base_object *obj;
127	struct ttm_object_file *tfile;
128};
129
130static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
131
132static inline struct ttm_object_file *
133ttm_object_file_ref(struct ttm_object_file *tfile)
134{
135	kref_get(&tfile->refcount);
136	return tfile;
137}
138
139static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile,
140				  uint64_t key,
141				  struct vmwgfx_hash_item **p_hash)
142{
143	struct vmwgfx_hash_item *hash;
144
145	hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) {
146		if (hash->key == key) {
147			*p_hash = hash;
148			return 0;
149		}
150	}
151	return -EINVAL;
152}
153
154static int ttm_tfile_find_ref(struct ttm_object_file *tfile,
155			      uint64_t key,
156			      struct vmwgfx_hash_item **p_hash)
157{
158	struct vmwgfx_hash_item *hash;
159
160	hash_for_each_possible(tfile->ref_hash, hash, head, key) {
161		if (hash->key == key) {
162			*p_hash = hash;
163			return 0;
164		}
165	}
166	return -EINVAL;
167}
168
169static void ttm_object_file_destroy(struct kref *kref)
170{
171	struct ttm_object_file *tfile =
172		container_of(kref, struct ttm_object_file, refcount);
173
174	kfree(tfile);
175}
176
177
178static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
179{
180	struct ttm_object_file *tfile = *p_tfile;
181
182	*p_tfile = NULL;
183	kref_put(&tfile->refcount, ttm_object_file_destroy);
184}
185
186
187int ttm_base_object_init(struct ttm_object_file *tfile,
188			 struct ttm_base_object *base,
189			 bool shareable,
190			 enum ttm_object_type object_type,
191			 void (*refcount_release) (struct ttm_base_object **))
 
 
192{
193	struct ttm_object_device *tdev = tfile->tdev;
194	int ret;
195
196	base->shareable = shareable;
197	base->tfile = ttm_object_file_ref(tfile);
198	base->refcount_release = refcount_release;
 
199	base->object_type = object_type;
200	kref_init(&base->refcount);
201	idr_preload(GFP_KERNEL);
202	spin_lock(&tdev->object_lock);
203	ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
204	spin_unlock(&tdev->object_lock);
205	idr_preload_end();
206	if (ret < 0)
207		return ret;
208
209	base->handle = ret;
210	ret = ttm_ref_object_add(tfile, base, NULL, false);
211	if (unlikely(ret != 0))
212		goto out_err1;
213
214	ttm_base_object_unref(&base);
215
216	return 0;
217out_err1:
218	spin_lock(&tdev->object_lock);
219	idr_remove(&tdev->idr, base->handle);
220	spin_unlock(&tdev->object_lock);
221	return ret;
222}
223
224static void ttm_release_base(struct kref *kref)
225{
226	struct ttm_base_object *base =
227	    container_of(kref, struct ttm_base_object, refcount);
228	struct ttm_object_device *tdev = base->tfile->tdev;
229
230	spin_lock(&tdev->object_lock);
231	idr_remove(&tdev->idr, base->handle);
232	spin_unlock(&tdev->object_lock);
233
234	/*
235	 * Note: We don't use synchronize_rcu() here because it's far
236	 * too slow. It's up to the user to free the object using
237	 * call_rcu() or ttm_base_object_kfree().
238	 */
239
240	ttm_object_file_unref(&base->tfile);
241	if (base->refcount_release)
242		base->refcount_release(&base);
243}
244
245void ttm_base_object_unref(struct ttm_base_object **p_base)
246{
247	struct ttm_base_object *base = *p_base;
248
249	*p_base = NULL;
250
251	kref_put(&base->refcount, ttm_release_base);
252}
253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
255					       uint64_t key)
256{
257	struct ttm_base_object *base = NULL;
258	struct vmwgfx_hash_item *hash;
 
259	int ret;
260
261	spin_lock(&tfile->lock);
262	ret = ttm_tfile_find_ref(tfile, key, &hash);
263
264	if (likely(ret == 0)) {
265		base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
266		if (!kref_get_unless_zero(&base->refcount))
267			base = NULL;
268	}
269	spin_unlock(&tfile->lock);
270
271
272	return base;
273}
274
275struct ttm_base_object *
276ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key)
277{
278	struct ttm_base_object *base;
279
280	rcu_read_lock();
281	base = idr_find(&tdev->idr, key);
282
283	if (base && !kref_get_unless_zero(&base->refcount))
284		base = NULL;
285	rcu_read_unlock();
286
287	return base;
288}
289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290int ttm_ref_object_add(struct ttm_object_file *tfile,
291		       struct ttm_base_object *base,
292		       bool *existed,
293		       bool require_existed)
294{
 
295	struct ttm_ref_object *ref;
296	struct vmwgfx_hash_item *hash;
 
 
 
 
 
297	int ret = -EINVAL;
298
299	if (base->tfile != tfile && !base->shareable)
300		return -EPERM;
301
302	if (existed != NULL)
303		*existed = true;
304
305	while (ret == -EINVAL) {
306		rcu_read_lock();
307		ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash);
308
309		if (ret == 0) {
310			ref = hlist_entry(hash, struct ttm_ref_object, hash);
311			if (kref_get_unless_zero(&ref->kref)) {
312				rcu_read_unlock();
313				break;
314			}
315		}
316
317		rcu_read_unlock();
318		if (require_existed)
319			return -EPERM;
320
 
 
 
 
321		ref = kmalloc(sizeof(*ref), GFP_KERNEL);
322		if (unlikely(ref == NULL)) {
 
323			return -ENOMEM;
324		}
325
326		ref->hash.key = base->handle;
327		ref->obj = base;
328		ref->tfile = tfile;
 
329		kref_init(&ref->kref);
330
331		spin_lock(&tfile->lock);
332		hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key);
333		ret = 0;
 
 
 
 
 
 
 
 
334
335		list_add_tail(&ref->head, &tfile->ref_list);
336		kref_get(&base->refcount);
337		spin_unlock(&tfile->lock);
338		if (existed != NULL)
339			*existed = false;
 
 
340	}
341
342	return ret;
343}
344
345static void __releases(tfile->lock) __acquires(tfile->lock)
346ttm_ref_object_release(struct kref *kref)
347{
348	struct ttm_ref_object *ref =
349	    container_of(kref, struct ttm_ref_object, kref);
 
350	struct ttm_object_file *tfile = ref->tfile;
 
 
351
352	hash_del_rcu(&ref->hash.head);
 
353	list_del(&ref->head);
354	spin_unlock(&tfile->lock);
355
 
 
 
356	ttm_base_object_unref(&ref->obj);
 
357	kfree_rcu(ref, rcu_head);
358	spin_lock(&tfile->lock);
359}
360
361int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
362			      unsigned long key)
363{
 
364	struct ttm_ref_object *ref;
365	struct vmwgfx_hash_item *hash;
366	int ret;
367
368	spin_lock(&tfile->lock);
369	ret = ttm_tfile_find_ref(tfile, key, &hash);
370	if (unlikely(ret != 0)) {
371		spin_unlock(&tfile->lock);
372		return -EINVAL;
373	}
374	ref = hlist_entry(hash, struct ttm_ref_object, hash);
375	kref_put(&ref->kref, ttm_ref_object_release);
376	spin_unlock(&tfile->lock);
377	return 0;
378}
379
380void ttm_object_file_release(struct ttm_object_file **p_tfile)
381{
382	struct ttm_ref_object *ref;
383	struct list_head *list;
 
384	struct ttm_object_file *tfile = *p_tfile;
385
386	*p_tfile = NULL;
387	spin_lock(&tfile->lock);
388
389	/*
390	 * Since we release the lock within the loop, we have to
391	 * restart it from the beginning each time.
392	 */
393
394	while (!list_empty(&tfile->ref_list)) {
395		list = tfile->ref_list.next;
396		ref = list_entry(list, struct ttm_ref_object, head);
397		ttm_ref_object_release(&ref->kref);
398	}
399
400	spin_unlock(&tfile->lock);
 
 
401
402	ttm_object_file_unref(&tfile);
403}
404
405struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev)
 
406{
407	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
 
 
 
408
409	if (unlikely(tfile == NULL))
410		return NULL;
411
412	spin_lock_init(&tfile->lock);
413	tfile->tdev = tdev;
414	kref_init(&tfile->refcount);
415	INIT_LIST_HEAD(&tfile->ref_list);
416
417	hash_init(tfile->ref_hash);
 
 
 
 
 
 
418
419	return tfile;
 
 
 
 
 
 
 
420}
421
422struct ttm_object_device *
423ttm_object_device_init(const struct dma_buf_ops *ops)
 
 
424{
425	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
 
426
427	if (unlikely(tdev == NULL))
428		return NULL;
429
 
430	spin_lock_init(&tdev->object_lock);
 
 
 
 
431
432	/*
433	 * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
434	 * a seperate namespace for GEM handles (which are
435	 * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
436	 * can take either handle as an argument so we want to
437	 * easily be able to tell whether the handle refers to a
438	 * GEM buffer or a surface.
439	 */
440	idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
441	tdev->ops = *ops;
442	tdev->dmabuf_release = tdev->ops.release;
443	tdev->ops.release = ttm_prime_dmabuf_release;
 
 
444	return tdev;
 
 
 
 
445}
446
447void ttm_object_device_release(struct ttm_object_device **p_tdev)
448{
449	struct ttm_object_device *tdev = *p_tdev;
450
451	*p_tdev = NULL;
452
453	WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
454	idr_destroy(&tdev->idr);
 
455
456	kfree(tdev);
457}
458
459/**
460 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
461 *
462 * @dmabuf: Non-refcounted pointer to a struct dma-buf.
463 *
464 * Obtain a file reference from a lookup structure that doesn't refcount
465 * the file, but synchronizes with its release method to make sure it has
466 * not been freed yet. See for example kref_get_unless_zero documentation.
467 * Returns true if refcounting succeeds, false otherwise.
468 *
469 * Nobody really wants this as a public API yet, so let it mature here
470 * for some time...
471 */
472static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
473{
474	return file_ref_get(&dmabuf->file->f_ref);
475}
476
477/**
478 * ttm_prime_refcount_release - refcount release method for a prime object.
479 *
480 * @p_base: Pointer to ttm_base_object pointer.
481 *
482 * This is a wrapper that calls the refcount_release founction of the
483 * underlying object. At the same time it cleans up the prime object.
484 * This function is called when all references to the base object we
485 * derive from are gone.
486 */
487static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
488{
489	struct ttm_base_object *base = *p_base;
490	struct ttm_prime_object *prime;
491
492	*p_base = NULL;
493	prime = container_of(base, struct ttm_prime_object, base);
494	BUG_ON(prime->dma_buf != NULL);
495	mutex_destroy(&prime->mutex);
496	if (prime->refcount_release)
497		prime->refcount_release(&base);
498}
499
500/**
501 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
502 *
503 * @dma_buf:
504 *
505 * This function first calls the dma_buf release method the driver
506 * provides. Then it cleans up our dma_buf pointer used for lookup,
507 * and finally releases the reference the dma_buf has on our base
508 * object.
509 */
510static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
511{
512	struct ttm_prime_object *prime =
513		(struct ttm_prime_object *) dma_buf->priv;
514	struct ttm_base_object *base = &prime->base;
515	struct ttm_object_device *tdev = base->tfile->tdev;
516
517	if (tdev->dmabuf_release)
518		tdev->dmabuf_release(dma_buf);
519	mutex_lock(&prime->mutex);
520	if (prime->dma_buf == dma_buf)
521		prime->dma_buf = NULL;
522	mutex_unlock(&prime->mutex);
 
523	ttm_base_object_unref(&base);
524}
525
526/**
527 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
528 *
529 * @tfile: A struct ttm_object_file identifying the caller.
530 * @fd: The prime / dmabuf fd.
531 * @handle: The returned handle.
532 *
533 * This function returns a handle to an object that previously exported
534 * a dma-buf. Note that we don't handle imports yet, because we simply
535 * have no consumers of that implementation.
536 */
537int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
538			   int fd, u32 *handle)
539{
540	struct ttm_object_device *tdev = tfile->tdev;
541	struct dma_buf *dma_buf;
542	struct ttm_prime_object *prime;
543	struct ttm_base_object *base;
544	int ret;
545
546	dma_buf = dma_buf_get(fd);
547	if (IS_ERR(dma_buf))
548		return PTR_ERR(dma_buf);
549
550	if (dma_buf->ops != &tdev->ops)
551		return -ENOSYS;
552
553	prime = (struct ttm_prime_object *) dma_buf->priv;
554	base = &prime->base;
555	*handle = base->handle;
556	ret = ttm_ref_object_add(tfile, base, NULL, false);
557
558	dma_buf_put(dma_buf);
559
560	return ret;
561}
562
563/**
564 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
565 *
566 * @tfile: Struct ttm_object_file identifying the caller.
567 * @handle: Handle to the object we're exporting from.
568 * @flags: flags for dma-buf creation. We just pass them on.
569 * @prime_fd: The returned file descriptor.
570 *
571 */
572int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
573			   uint32_t handle, uint32_t flags,
574			   int *prime_fd)
575{
576	struct ttm_object_device *tdev = tfile->tdev;
577	struct ttm_base_object *base;
578	struct dma_buf *dma_buf;
579	struct ttm_prime_object *prime;
580	int ret;
581
582	base = ttm_base_object_lookup(tfile, handle);
583	if (unlikely(base == NULL ||
584		     base->object_type != ttm_prime_type)) {
585		ret = -ENOENT;
586		goto out_unref;
587	}
588
589	prime = container_of(base, struct ttm_prime_object, base);
590	if (unlikely(!base->shareable)) {
591		ret = -EPERM;
592		goto out_unref;
593	}
594
595	ret = mutex_lock_interruptible(&prime->mutex);
596	if (unlikely(ret != 0)) {
597		ret = -ERESTARTSYS;
598		goto out_unref;
599	}
600
601	dma_buf = prime->dma_buf;
602	if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
603		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
 
 
 
604		exp_info.ops = &tdev->ops;
605		exp_info.size = prime->size;
606		exp_info.flags = flags;
607		exp_info.priv = prime;
608
609		/*
610		 * Need to create a new dma_buf
611		 */
 
 
 
 
 
 
612
613		dma_buf = dma_buf_export(&exp_info);
614		if (IS_ERR(dma_buf)) {
615			ret = PTR_ERR(dma_buf);
 
 
616			mutex_unlock(&prime->mutex);
617			goto out_unref;
618		}
619
620		/*
621		 * dma_buf has taken the base object reference
622		 */
623		base = NULL;
624		prime->dma_buf = dma_buf;
625	}
626	mutex_unlock(&prime->mutex);
627
628	ret = dma_buf_fd(dma_buf, flags);
629	if (ret >= 0) {
630		*prime_fd = ret;
631		ret = 0;
632	} else
633		dma_buf_put(dma_buf);
634
635out_unref:
636	if (base)
637		ttm_base_object_unref(&base);
638	return ret;
639}
640
641/**
642 * ttm_prime_object_init - Initialize a ttm_prime_object
643 *
644 * @tfile: struct ttm_object_file identifying the caller
645 * @size: The size of the dma_bufs we export.
646 * @prime: The object to be initialized.
 
647 * @type: See ttm_base_object_init
648 * @refcount_release: See ttm_base_object_init
 
649 *
650 * Initializes an object which is compatible with the drm_prime model
651 * for data sharing between processes and devices.
652 */
653int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
654			  struct ttm_prime_object *prime,
655			  enum ttm_object_type type,
656			  void (*refcount_release) (struct ttm_base_object **))
 
 
657{
658	bool shareable = !!(type == VMW_RES_SURFACE);
659	mutex_init(&prime->mutex);
660	prime->size = PAGE_ALIGN(size);
661	prime->real_type = type;
662	prime->dma_buf = NULL;
663	prime->refcount_release = refcount_release;
664	return ttm_base_object_init(tfile, &prime->base, shareable,
665				    ttm_prime_type,
666				    ttm_prime_refcount_release);
 
667}
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 30 *
 31 * While no substantial code is shared, the prime code is inspired by
 32 * drm_prime.c, with
 33 * Authors:
 34 *      Dave Airlie <airlied@redhat.com>
 35 *      Rob Clark <rob.clark@linaro.org>
 36 */
 37/** @file ttm_ref_object.c
 38 *
 39 * Base- and reference object implementation for the various
 40 * ttm objects. Implements reference counting, minimal security checks
 41 * and release on file close.
 42 */
 43
 44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45/**
 46 * struct ttm_object_file
 47 *
 48 * @tdev: Pointer to the ttm_object_device.
 49 *
 50 * @lock: Lock that protects the ref_list list and the
 51 * ref_hash hash tables.
 52 *
 53 * @ref_list: List of ttm_ref_objects to be destroyed at
 54 * file release.
 55 *
 56 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
 57 * for fast lookup of ref objects given a base object.
 
 
 58 */
 59
 60#define pr_fmt(fmt) "[TTM] " fmt
 61
 62#include <drm/ttm/ttm_module.h>
 63#include <linux/list.h>
 64#include <linux/spinlock.h>
 65#include <linux/slab.h>
 66#include <linux/atomic.h>
 67#include "ttm_object.h"
 68
 69struct ttm_object_file {
 70	struct ttm_object_device *tdev;
 71	spinlock_t lock;
 72	struct list_head ref_list;
 73	struct drm_open_hash ref_hash[TTM_REF_NUM];
 74	struct kref refcount;
 75};
 76
 77/**
 78 * struct ttm_object_device
 79 *
 80 * @object_lock: lock that protects the object_hash hash table.
 81 *
 82 * @object_hash: hash table for fast lookup of object global names.
 83 *
 84 * @object_count: Per device object count.
 85 *
 86 * This is the per-device data structure needed for ttm object management.
 87 */
 88
 89struct ttm_object_device {
 90	spinlock_t object_lock;
 91	struct drm_open_hash object_hash;
 92	atomic_t object_count;
 93	struct ttm_mem_global *mem_glob;
 94	struct dma_buf_ops ops;
 95	void (*dmabuf_release)(struct dma_buf *dma_buf);
 96	size_t dma_buf_size;
 97	struct idr idr;
 98};
 99
100/**
101 * struct ttm_ref_object
102 *
103 * @hash: Hash entry for the per-file object reference hash.
104 *
105 * @head: List entry for the per-file list of ref-objects.
106 *
107 * @kref: Ref count.
108 *
109 * @obj: Base object this ref object is referencing.
110 *
111 * @ref_type: Type of ref object.
112 *
113 * This is similar to an idr object, but it also has a hash table entry
114 * that allows lookup with a pointer to the referenced object as a key. In
115 * that way, one can easily detect whether a base object is referenced by
116 * a particular ttm_object_file. It also carries a ref count to avoid creating
117 * multiple ref objects if a ttm_object_file references the same base
118 * object more than once.
119 */
120
121struct ttm_ref_object {
122	struct rcu_head rcu_head;
123	struct drm_hash_item hash;
124	struct list_head head;
125	struct kref kref;
126	enum ttm_ref_type ref_type;
127	struct ttm_base_object *obj;
128	struct ttm_object_file *tfile;
129};
130
131static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
132
133static inline struct ttm_object_file *
134ttm_object_file_ref(struct ttm_object_file *tfile)
135{
136	kref_get(&tfile->refcount);
137	return tfile;
138}
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140static void ttm_object_file_destroy(struct kref *kref)
141{
142	struct ttm_object_file *tfile =
143		container_of(kref, struct ttm_object_file, refcount);
144
145	kfree(tfile);
146}
147
148
149static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
150{
151	struct ttm_object_file *tfile = *p_tfile;
152
153	*p_tfile = NULL;
154	kref_put(&tfile->refcount, ttm_object_file_destroy);
155}
156
157
158int ttm_base_object_init(struct ttm_object_file *tfile,
159			 struct ttm_base_object *base,
160			 bool shareable,
161			 enum ttm_object_type object_type,
162			 void (*refcount_release) (struct ttm_base_object **),
163			 void (*ref_obj_release) (struct ttm_base_object *,
164						  enum ttm_ref_type ref_type))
165{
166	struct ttm_object_device *tdev = tfile->tdev;
167	int ret;
168
169	base->shareable = shareable;
170	base->tfile = ttm_object_file_ref(tfile);
171	base->refcount_release = refcount_release;
172	base->ref_obj_release = ref_obj_release;
173	base->object_type = object_type;
174	kref_init(&base->refcount);
175	idr_preload(GFP_KERNEL);
176	spin_lock(&tdev->object_lock);
177	ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
178	spin_unlock(&tdev->object_lock);
179	idr_preload_end();
180	if (ret < 0)
181		return ret;
182
183	base->handle = ret;
184	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
185	if (unlikely(ret != 0))
186		goto out_err1;
187
188	ttm_base_object_unref(&base);
189
190	return 0;
191out_err1:
192	spin_lock(&tdev->object_lock);
193	idr_remove(&tdev->idr, base->handle);
194	spin_unlock(&tdev->object_lock);
195	return ret;
196}
197
198static void ttm_release_base(struct kref *kref)
199{
200	struct ttm_base_object *base =
201	    container_of(kref, struct ttm_base_object, refcount);
202	struct ttm_object_device *tdev = base->tfile->tdev;
203
204	spin_lock(&tdev->object_lock);
205	idr_remove(&tdev->idr, base->handle);
206	spin_unlock(&tdev->object_lock);
207
208	/*
209	 * Note: We don't use synchronize_rcu() here because it's far
210	 * too slow. It's up to the user to free the object using
211	 * call_rcu() or ttm_base_object_kfree().
212	 */
213
214	ttm_object_file_unref(&base->tfile);
215	if (base->refcount_release)
216		base->refcount_release(&base);
217}
218
219void ttm_base_object_unref(struct ttm_base_object **p_base)
220{
221	struct ttm_base_object *base = *p_base;
222
223	*p_base = NULL;
224
225	kref_put(&base->refcount, ttm_release_base);
226}
227
228/**
229 * ttm_base_object_noref_lookup - look up a base object without reference
230 * @tfile: The struct ttm_object_file the object is registered with.
231 * @key: The object handle.
232 *
233 * This function looks up a ttm base object and returns a pointer to it
234 * without refcounting the pointer. The returned pointer is only valid
235 * until ttm_base_object_noref_release() is called, and the object
236 * pointed to by the returned pointer may be doomed. Any persistent usage
237 * of the object requires a refcount to be taken using kref_get_unless_zero().
238 * Iff this function returns successfully it needs to be paired with
239 * ttm_base_object_noref_release() and no sleeping- or scheduling functions
240 * may be called inbetween these function callse.
241 *
242 * Return: A pointer to the object if successful or NULL otherwise.
243 */
244struct ttm_base_object *
245ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
246{
247	struct drm_hash_item *hash;
248	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
249	int ret;
250
251	rcu_read_lock();
252	ret = drm_ht_find_item_rcu(ht, key, &hash);
253	if (ret) {
254		rcu_read_unlock();
255		return NULL;
256	}
257
258	__release(RCU);
259	return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
260}
261EXPORT_SYMBOL(ttm_base_object_noref_lookup);
262
263struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
264					       uint32_t key)
265{
266	struct ttm_base_object *base = NULL;
267	struct drm_hash_item *hash;
268	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
269	int ret;
270
271	rcu_read_lock();
272	ret = drm_ht_find_item_rcu(ht, key, &hash);
273
274	if (likely(ret == 0)) {
275		base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
276		if (!kref_get_unless_zero(&base->refcount))
277			base = NULL;
278	}
279	rcu_read_unlock();
 
280
281	return base;
282}
283
284struct ttm_base_object *
285ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
286{
287	struct ttm_base_object *base;
288
289	rcu_read_lock();
290	base = idr_find(&tdev->idr, key);
291
292	if (base && !kref_get_unless_zero(&base->refcount))
293		base = NULL;
294	rcu_read_unlock();
295
296	return base;
297}
298
299/**
300 * ttm_ref_object_exists - Check whether a caller has a valid ref object
301 * (has opened) a base object.
302 *
303 * @tfile: Pointer to a struct ttm_object_file identifying the caller.
304 * @base: Pointer to a struct base object.
305 *
306 * Checks wether the caller identified by @tfile has put a valid USAGE
307 * reference object on the base object identified by @base.
308 */
309bool ttm_ref_object_exists(struct ttm_object_file *tfile,
310			   struct ttm_base_object *base)
311{
312	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
313	struct drm_hash_item *hash;
314	struct ttm_ref_object *ref;
315
316	rcu_read_lock();
317	if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
318		goto out_false;
319
320	/*
321	 * Verify that the ref object is really pointing to our base object.
322	 * Our base object could actually be dead, and the ref object pointing
323	 * to another base object with the same handle.
324	 */
325	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
326	if (unlikely(base != ref->obj))
327		goto out_false;
328
329	/*
330	 * Verify that the ref->obj pointer was actually valid!
331	 */
332	rmb();
333	if (unlikely(kref_read(&ref->kref) == 0))
334		goto out_false;
335
336	rcu_read_unlock();
337	return true;
338
339 out_false:
340	rcu_read_unlock();
341	return false;
342}
343
344int ttm_ref_object_add(struct ttm_object_file *tfile,
345		       struct ttm_base_object *base,
346		       enum ttm_ref_type ref_type, bool *existed,
347		       bool require_existed)
348{
349	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
350	struct ttm_ref_object *ref;
351	struct drm_hash_item *hash;
352	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
353	struct ttm_operation_ctx ctx = {
354		.interruptible = false,
355		.no_wait_gpu = false
356	};
357	int ret = -EINVAL;
358
359	if (base->tfile != tfile && !base->shareable)
360		return -EPERM;
361
362	if (existed != NULL)
363		*existed = true;
364
365	while (ret == -EINVAL) {
366		rcu_read_lock();
367		ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
368
369		if (ret == 0) {
370			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
371			if (kref_get_unless_zero(&ref->kref)) {
372				rcu_read_unlock();
373				break;
374			}
375		}
376
377		rcu_read_unlock();
378		if (require_existed)
379			return -EPERM;
380
381		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
382					   &ctx);
383		if (unlikely(ret != 0))
384			return ret;
385		ref = kmalloc(sizeof(*ref), GFP_KERNEL);
386		if (unlikely(ref == NULL)) {
387			ttm_mem_global_free(mem_glob, sizeof(*ref));
388			return -ENOMEM;
389		}
390
391		ref->hash.key = base->handle;
392		ref->obj = base;
393		ref->tfile = tfile;
394		ref->ref_type = ref_type;
395		kref_init(&ref->kref);
396
397		spin_lock(&tfile->lock);
398		ret = drm_ht_insert_item_rcu(ht, &ref->hash);
399
400		if (likely(ret == 0)) {
401			list_add_tail(&ref->head, &tfile->ref_list);
402			kref_get(&base->refcount);
403			spin_unlock(&tfile->lock);
404			if (existed != NULL)
405				*existed = false;
406			break;
407		}
408
 
 
409		spin_unlock(&tfile->lock);
410		BUG_ON(ret != -EINVAL);
411
412		ttm_mem_global_free(mem_glob, sizeof(*ref));
413		kfree(ref);
414	}
415
416	return ret;
417}
418
419static void __releases(tfile->lock) __acquires(tfile->lock)
420ttm_ref_object_release(struct kref *kref)
421{
422	struct ttm_ref_object *ref =
423	    container_of(kref, struct ttm_ref_object, kref);
424	struct ttm_base_object *base = ref->obj;
425	struct ttm_object_file *tfile = ref->tfile;
426	struct drm_open_hash *ht;
427	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
428
429	ht = &tfile->ref_hash[ref->ref_type];
430	(void)drm_ht_remove_item_rcu(ht, &ref->hash);
431	list_del(&ref->head);
432	spin_unlock(&tfile->lock);
433
434	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
435		base->ref_obj_release(base, ref->ref_type);
436
437	ttm_base_object_unref(&ref->obj);
438	ttm_mem_global_free(mem_glob, sizeof(*ref));
439	kfree_rcu(ref, rcu_head);
440	spin_lock(&tfile->lock);
441}
442
443int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
444			      unsigned long key, enum ttm_ref_type ref_type)
445{
446	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
447	struct ttm_ref_object *ref;
448	struct drm_hash_item *hash;
449	int ret;
450
451	spin_lock(&tfile->lock);
452	ret = drm_ht_find_item(ht, key, &hash);
453	if (unlikely(ret != 0)) {
454		spin_unlock(&tfile->lock);
455		return -EINVAL;
456	}
457	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
458	kref_put(&ref->kref, ttm_ref_object_release);
459	spin_unlock(&tfile->lock);
460	return 0;
461}
462
463void ttm_object_file_release(struct ttm_object_file **p_tfile)
464{
465	struct ttm_ref_object *ref;
466	struct list_head *list;
467	unsigned int i;
468	struct ttm_object_file *tfile = *p_tfile;
469
470	*p_tfile = NULL;
471	spin_lock(&tfile->lock);
472
473	/*
474	 * Since we release the lock within the loop, we have to
475	 * restart it from the beginning each time.
476	 */
477
478	while (!list_empty(&tfile->ref_list)) {
479		list = tfile->ref_list.next;
480		ref = list_entry(list, struct ttm_ref_object, head);
481		ttm_ref_object_release(&ref->kref);
482	}
483
484	spin_unlock(&tfile->lock);
485	for (i = 0; i < TTM_REF_NUM; ++i)
486		drm_ht_remove(&tfile->ref_hash[i]);
487
488	ttm_object_file_unref(&tfile);
489}
490
491struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
492					     unsigned int hash_order)
493{
494	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
495	unsigned int i;
496	unsigned int j = 0;
497	int ret;
498
499	if (unlikely(tfile == NULL))
500		return NULL;
501
502	spin_lock_init(&tfile->lock);
503	tfile->tdev = tdev;
504	kref_init(&tfile->refcount);
505	INIT_LIST_HEAD(&tfile->ref_list);
506
507	for (i = 0; i < TTM_REF_NUM; ++i) {
508		ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
509		if (ret) {
510			j = i;
511			goto out_err;
512		}
513	}
514
515	return tfile;
516out_err:
517	for (i = 0; i < j; ++i)
518		drm_ht_remove(&tfile->ref_hash[i]);
519
520	kfree(tfile);
521
522	return NULL;
523}
524
525struct ttm_object_device *
526ttm_object_device_init(struct ttm_mem_global *mem_glob,
527		       unsigned int hash_order,
528		       const struct dma_buf_ops *ops)
529{
530	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
531	int ret;
532
533	if (unlikely(tdev == NULL))
534		return NULL;
535
536	tdev->mem_glob = mem_glob;
537	spin_lock_init(&tdev->object_lock);
538	atomic_set(&tdev->object_count, 0);
539	ret = drm_ht_create(&tdev->object_hash, hash_order);
540	if (ret != 0)
541		goto out_no_object_hash;
542
543	idr_init(&tdev->idr);
 
 
 
 
 
 
 
 
544	tdev->ops = *ops;
545	tdev->dmabuf_release = tdev->ops.release;
546	tdev->ops.release = ttm_prime_dmabuf_release;
547	tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
548		ttm_round_pot(sizeof(struct file));
549	return tdev;
550
551out_no_object_hash:
552	kfree(tdev);
553	return NULL;
554}
555
556void ttm_object_device_release(struct ttm_object_device **p_tdev)
557{
558	struct ttm_object_device *tdev = *p_tdev;
559
560	*p_tdev = NULL;
561
562	WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
563	idr_destroy(&tdev->idr);
564	drm_ht_remove(&tdev->object_hash);
565
566	kfree(tdev);
567}
568
569/**
570 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
571 *
572 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
573 *
574 * Obtain a file reference from a lookup structure that doesn't refcount
575 * the file, but synchronizes with its release method to make sure it has
576 * not been freed yet. See for example kref_get_unless_zero documentation.
577 * Returns true if refcounting succeeds, false otherwise.
578 *
579 * Nobody really wants this as a public API yet, so let it mature here
580 * for some time...
581 */
582static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
583{
584	return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
585}
586
587/**
588 * ttm_prime_refcount_release - refcount release method for a prime object.
589 *
590 * @p_base: Pointer to ttm_base_object pointer.
591 *
592 * This is a wrapper that calls the refcount_release founction of the
593 * underlying object. At the same time it cleans up the prime object.
594 * This function is called when all references to the base object we
595 * derive from are gone.
596 */
597static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
598{
599	struct ttm_base_object *base = *p_base;
600	struct ttm_prime_object *prime;
601
602	*p_base = NULL;
603	prime = container_of(base, struct ttm_prime_object, base);
604	BUG_ON(prime->dma_buf != NULL);
605	mutex_destroy(&prime->mutex);
606	if (prime->refcount_release)
607		prime->refcount_release(&base);
608}
609
610/**
611 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
612 *
613 * @dma_buf:
614 *
615 * This function first calls the dma_buf release method the driver
616 * provides. Then it cleans up our dma_buf pointer used for lookup,
617 * and finally releases the reference the dma_buf has on our base
618 * object.
619 */
620static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
621{
622	struct ttm_prime_object *prime =
623		(struct ttm_prime_object *) dma_buf->priv;
624	struct ttm_base_object *base = &prime->base;
625	struct ttm_object_device *tdev = base->tfile->tdev;
626
627	if (tdev->dmabuf_release)
628		tdev->dmabuf_release(dma_buf);
629	mutex_lock(&prime->mutex);
630	if (prime->dma_buf == dma_buf)
631		prime->dma_buf = NULL;
632	mutex_unlock(&prime->mutex);
633	ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
634	ttm_base_object_unref(&base);
635}
636
637/**
638 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
639 *
640 * @tfile: A struct ttm_object_file identifying the caller.
641 * @fd: The prime / dmabuf fd.
642 * @handle: The returned handle.
643 *
644 * This function returns a handle to an object that previously exported
645 * a dma-buf. Note that we don't handle imports yet, because we simply
646 * have no consumers of that implementation.
647 */
648int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
649			   int fd, u32 *handle)
650{
651	struct ttm_object_device *tdev = tfile->tdev;
652	struct dma_buf *dma_buf;
653	struct ttm_prime_object *prime;
654	struct ttm_base_object *base;
655	int ret;
656
657	dma_buf = dma_buf_get(fd);
658	if (IS_ERR(dma_buf))
659		return PTR_ERR(dma_buf);
660
661	if (dma_buf->ops != &tdev->ops)
662		return -ENOSYS;
663
664	prime = (struct ttm_prime_object *) dma_buf->priv;
665	base = &prime->base;
666	*handle = base->handle;
667	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
668
669	dma_buf_put(dma_buf);
670
671	return ret;
672}
673
674/**
675 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
676 *
677 * @tfile: Struct ttm_object_file identifying the caller.
678 * @handle: Handle to the object we're exporting from.
679 * @flags: flags for dma-buf creation. We just pass them on.
680 * @prime_fd: The returned file descriptor.
681 *
682 */
683int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
684			   uint32_t handle, uint32_t flags,
685			   int *prime_fd)
686{
687	struct ttm_object_device *tdev = tfile->tdev;
688	struct ttm_base_object *base;
689	struct dma_buf *dma_buf;
690	struct ttm_prime_object *prime;
691	int ret;
692
693	base = ttm_base_object_lookup(tfile, handle);
694	if (unlikely(base == NULL ||
695		     base->object_type != ttm_prime_type)) {
696		ret = -ENOENT;
697		goto out_unref;
698	}
699
700	prime = container_of(base, struct ttm_prime_object, base);
701	if (unlikely(!base->shareable)) {
702		ret = -EPERM;
703		goto out_unref;
704	}
705
706	ret = mutex_lock_interruptible(&prime->mutex);
707	if (unlikely(ret != 0)) {
708		ret = -ERESTARTSYS;
709		goto out_unref;
710	}
711
712	dma_buf = prime->dma_buf;
713	if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
714		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
715		struct ttm_operation_ctx ctx = {
716			.interruptible = true,
717			.no_wait_gpu = false
718		};
719		exp_info.ops = &tdev->ops;
720		exp_info.size = prime->size;
721		exp_info.flags = flags;
722		exp_info.priv = prime;
723
724		/*
725		 * Need to create a new dma_buf, with memory accounting.
726		 */
727		ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
728					   &ctx);
729		if (unlikely(ret != 0)) {
730			mutex_unlock(&prime->mutex);
731			goto out_unref;
732		}
733
734		dma_buf = dma_buf_export(&exp_info);
735		if (IS_ERR(dma_buf)) {
736			ret = PTR_ERR(dma_buf);
737			ttm_mem_global_free(tdev->mem_glob,
738					    tdev->dma_buf_size);
739			mutex_unlock(&prime->mutex);
740			goto out_unref;
741		}
742
743		/*
744		 * dma_buf has taken the base object reference
745		 */
746		base = NULL;
747		prime->dma_buf = dma_buf;
748	}
749	mutex_unlock(&prime->mutex);
750
751	ret = dma_buf_fd(dma_buf, flags);
752	if (ret >= 0) {
753		*prime_fd = ret;
754		ret = 0;
755	} else
756		dma_buf_put(dma_buf);
757
758out_unref:
759	if (base)
760		ttm_base_object_unref(&base);
761	return ret;
762}
763
764/**
765 * ttm_prime_object_init - Initialize a ttm_prime_object
766 *
767 * @tfile: struct ttm_object_file identifying the caller
768 * @size: The size of the dma_bufs we export.
769 * @prime: The object to be initialized.
770 * @shareable: See ttm_base_object_init
771 * @type: See ttm_base_object_init
772 * @refcount_release: See ttm_base_object_init
773 * @ref_obj_release: See ttm_base_object_init
774 *
775 * Initializes an object which is compatible with the drm_prime model
776 * for data sharing between processes and devices.
777 */
778int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
779			  struct ttm_prime_object *prime, bool shareable,
780			  enum ttm_object_type type,
781			  void (*refcount_release) (struct ttm_base_object **),
782			  void (*ref_obj_release) (struct ttm_base_object *,
783						   enum ttm_ref_type ref_type))
784{
 
785	mutex_init(&prime->mutex);
786	prime->size = PAGE_ALIGN(size);
787	prime->real_type = type;
788	prime->dma_buf = NULL;
789	prime->refcount_release = refcount_release;
790	return ttm_base_object_init(tfile, &prime->base, shareable,
791				    ttm_prime_type,
792				    ttm_prime_refcount_release,
793				    ref_obj_release);
794}